1
2
3
4
5
6
7
8
9
10
11
12#include <linux/fs.h>
13#include <linux/mm.h>
14#include <linux/cpu.h>
15#include <linux/smp.h>
16#include <linux/idr.h>
17#include <linux/file.h>
18#include <linux/poll.h>
19#include <linux/slab.h>
20#include <linux/hash.h>
21#include <linux/tick.h>
22#include <linux/sysfs.h>
23#include <linux/dcache.h>
24#include <linux/percpu.h>
25#include <linux/ptrace.h>
26#include <linux/reboot.h>
27#include <linux/vmstat.h>
28#include <linux/device.h>
29#include <linux/export.h>
30#include <linux/vmalloc.h>
31#include <linux/hardirq.h>
32#include <linux/rculist.h>
33#include <linux/uaccess.h>
34#include <linux/syscalls.h>
35#include <linux/anon_inodes.h>
36#include <linux/kernel_stat.h>
37#include <linux/perf_event.h>
38#include <linux/ftrace_event.h>
39#include <linux/hw_breakpoint.h>
40#include <linux/mm_types.h>
41#include <linux/cgroup.h>
42
43#include "internal.h"
44
45#include <asm/irq_regs.h>
46
47struct remote_function_call {
48 struct task_struct *p;
49 int (*func)(void *info);
50 void *info;
51 int ret;
52};
53
54static void remote_function(void *data)
55{
56 struct remote_function_call *tfc = data;
57 struct task_struct *p = tfc->p;
58
59 if (p) {
60 tfc->ret = -EAGAIN;
61 if (task_cpu(p) != smp_processor_id() || !task_curr(p))
62 return;
63 }
64
65 tfc->ret = tfc->func(tfc->info);
66}
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81static int
82task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
83{
84 struct remote_function_call data = {
85 .p = p,
86 .func = func,
87 .info = info,
88 .ret = -ESRCH,
89 };
90
91 if (task_curr(p))
92 smp_call_function_single(task_cpu(p), remote_function, &data, 1);
93
94 return data.ret;
95}
96
97
98
99
100
101
102
103
104
105
106static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
107{
108 struct remote_function_call data = {
109 .p = NULL,
110 .func = func,
111 .info = info,
112 .ret = -ENXIO,
113 };
114
115 smp_call_function_single(cpu, remote_function, &data, 1);
116
117 return data.ret;
118}
119
120#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
121 PERF_FLAG_FD_OUTPUT |\
122 PERF_FLAG_PID_CGROUP)
123
124
125
126
127#define PERF_SAMPLE_BRANCH_PERM_PLM \
128 (PERF_SAMPLE_BRANCH_KERNEL |\
129 PERF_SAMPLE_BRANCH_HV)
130
131enum event_type_t {
132 EVENT_FLEXIBLE = 0x1,
133 EVENT_PINNED = 0x2,
134 EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
135};
136
137
138
139
140
141struct static_key_deferred perf_sched_events __read_mostly;
142static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
143static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
144
145static atomic_t nr_mmap_events __read_mostly;
146static atomic_t nr_comm_events __read_mostly;
147static atomic_t nr_task_events __read_mostly;
148
149static LIST_HEAD(pmus);
150static DEFINE_MUTEX(pmus_lock);
151static struct srcu_struct pmus_srcu;
152
153
154
155
156
157
158
159
160int sysctl_perf_event_paranoid __read_mostly = 1;
161
162
163int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024);
164
165
166
167
168#define DEFAULT_MAX_SAMPLE_RATE 100000
169#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
170#define DEFAULT_CPU_TIME_MAX_PERCENT 25
171
172int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
173
174static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
175static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
176
177static atomic_t perf_sample_allowed_ns __read_mostly =
178 ATOMIC_INIT( DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100);
179
180void update_perf_cpu_limits(void)
181{
182 u64 tmp = perf_sample_period_ns;
183
184 tmp *= sysctl_perf_cpu_time_max_percent;
185 do_div(tmp, 100);
186 atomic_set(&perf_sample_allowed_ns, tmp);
187}
188
189static int perf_rotate_context(struct perf_cpu_context *cpuctx);
190
191int perf_proc_update_handler(struct ctl_table *table, int write,
192 void __user *buffer, size_t *lenp,
193 loff_t *ppos)
194{
195 int ret = proc_dointvec(table, write, buffer, lenp, ppos);
196
197 if (ret || !write)
198 return ret;
199
200 max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
201 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
202 update_perf_cpu_limits();
203
204 return 0;
205}
206
207int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
208
209int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
210 void __user *buffer, size_t *lenp,
211 loff_t *ppos)
212{
213 int ret = proc_dointvec(table, write, buffer, lenp, ppos);
214
215 if (ret || !write)
216 return ret;
217
218 update_perf_cpu_limits();
219
220 return 0;
221}
222
223
224
225
226
227
228
229#define NR_ACCUMULATED_SAMPLES 128
230DEFINE_PER_CPU(u64, running_sample_length);
231
232void perf_sample_event_took(u64 sample_len_ns)
233{
234 u64 avg_local_sample_len;
235 u64 local_samples_len;
236
237 if (atomic_read(&perf_sample_allowed_ns) == 0)
238 return;
239
240
241 local_samples_len = __get_cpu_var(running_sample_length);
242 local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
243 local_samples_len += sample_len_ns;
244 __get_cpu_var(running_sample_length) = local_samples_len;
245
246
247
248
249
250
251 avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
252
253 if (avg_local_sample_len <= atomic_read(&perf_sample_allowed_ns))
254 return;
255
256 if (max_samples_per_tick <= 1)
257 return;
258
259 max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2);
260 sysctl_perf_event_sample_rate = max_samples_per_tick * HZ;
261 perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
262
263 printk_ratelimited(KERN_WARNING
264 "perf samples too long (%lld > %d), lowering "
265 "kernel.perf_event_max_sample_rate to %d\n",
266 avg_local_sample_len,
267 atomic_read(&perf_sample_allowed_ns),
268 sysctl_perf_event_sample_rate);
269
270 update_perf_cpu_limits();
271}
272
273static atomic64_t perf_event_id;
274
275static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
276 enum event_type_t event_type);
277
278static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
279 enum event_type_t event_type,
280 struct task_struct *task);
281
282static void update_context_time(struct perf_event_context *ctx);
283static u64 perf_event_time(struct perf_event *event);
284
285void __weak perf_event_print_debug(void) { }
286
287extern __weak const char *perf_pmu_name(void)
288{
289 return "pmu";
290}
291
292static inline u64 perf_clock(void)
293{
294 return local_clock();
295}
296
297static inline struct perf_cpu_context *
298__get_cpu_context(struct perf_event_context *ctx)
299{
300 return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
301}
302
303static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
304 struct perf_event_context *ctx)
305{
306 raw_spin_lock(&cpuctx->ctx.lock);
307 if (ctx)
308 raw_spin_lock(&ctx->lock);
309}
310
311static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
312 struct perf_event_context *ctx)
313{
314 if (ctx)
315 raw_spin_unlock(&ctx->lock);
316 raw_spin_unlock(&cpuctx->ctx.lock);
317}
318
319#ifdef CONFIG_CGROUP_PERF
320
321
322
323
324
325struct perf_cgroup_info {
326 u64 time;
327 u64 timestamp;
328};
329
330struct perf_cgroup {
331 struct cgroup_subsys_state css;
332 struct perf_cgroup_info __percpu *info;
333};
334
335
336
337
338
339
340static inline struct perf_cgroup *
341perf_cgroup_from_task(struct task_struct *task)
342{
343 return container_of(task_subsys_state(task, perf_subsys_id),
344 struct perf_cgroup, css);
345}
346
347static inline bool
348perf_cgroup_match(struct perf_event *event)
349{
350 struct perf_event_context *ctx = event->ctx;
351 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
352
353
354 if (!event->cgrp)
355 return true;
356
357
358 if (!cpuctx->cgrp)
359 return false;
360
361
362
363
364
365
366
367 return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
368 event->cgrp->css.cgroup);
369}
370
371static inline bool perf_tryget_cgroup(struct perf_event *event)
372{
373 return css_tryget(&event->cgrp->css);
374}
375
376static inline void perf_put_cgroup(struct perf_event *event)
377{
378 css_put(&event->cgrp->css);
379}
380
381static inline void perf_detach_cgroup(struct perf_event *event)
382{
383 perf_put_cgroup(event);
384 event->cgrp = NULL;
385}
386
387static inline int is_cgroup_event(struct perf_event *event)
388{
389 return event->cgrp != NULL;
390}
391
392static inline u64 perf_cgroup_event_time(struct perf_event *event)
393{
394 struct perf_cgroup_info *t;
395
396 t = per_cpu_ptr(event->cgrp->info, event->cpu);
397 return t->time;
398}
399
400static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
401{
402 struct perf_cgroup_info *info;
403 u64 now;
404
405 now = perf_clock();
406
407 info = this_cpu_ptr(cgrp->info);
408
409 info->time += now - info->timestamp;
410 info->timestamp = now;
411}
412
413static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
414{
415 struct perf_cgroup *cgrp_out = cpuctx->cgrp;
416 if (cgrp_out)
417 __update_cgrp_time(cgrp_out);
418}
419
420static inline void update_cgrp_time_from_event(struct perf_event *event)
421{
422 struct perf_cgroup *cgrp;
423
424
425
426
427
428 if (!is_cgroup_event(event))
429 return;
430
431 cgrp = perf_cgroup_from_task(current);
432
433
434
435 if (cgrp == event->cgrp)
436 __update_cgrp_time(event->cgrp);
437}
438
439static inline void
440perf_cgroup_set_timestamp(struct task_struct *task,
441 struct perf_event_context *ctx)
442{
443 struct perf_cgroup *cgrp;
444 struct perf_cgroup_info *info;
445
446
447
448
449
450
451 if (!task || !ctx->nr_cgroups)
452 return;
453
454 cgrp = perf_cgroup_from_task(task);
455 info = this_cpu_ptr(cgrp->info);
456 info->timestamp = ctx->timestamp;
457}
458
459#define PERF_CGROUP_SWOUT 0x1
460#define PERF_CGROUP_SWIN 0x2
461
462
463
464
465
466
467
468void perf_cgroup_switch(struct task_struct *task, int mode)
469{
470 struct perf_cpu_context *cpuctx;
471 struct pmu *pmu;
472 unsigned long flags;
473
474
475
476
477
478
479 local_irq_save(flags);
480
481
482
483
484
485 rcu_read_lock();
486
487 list_for_each_entry_rcu(pmu, &pmus, entry) {
488 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
489 if (cpuctx->unique_pmu != pmu)
490 continue;
491
492
493
494
495
496
497
498
499 if (cpuctx->ctx.nr_cgroups > 0) {
500 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
501 perf_pmu_disable(cpuctx->ctx.pmu);
502
503 if (mode & PERF_CGROUP_SWOUT) {
504 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
505
506
507
508
509 cpuctx->cgrp = NULL;
510 }
511
512 if (mode & PERF_CGROUP_SWIN) {
513 WARN_ON_ONCE(cpuctx->cgrp);
514
515
516
517
518
519 cpuctx->cgrp = perf_cgroup_from_task(task);
520 cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
521 }
522 perf_pmu_enable(cpuctx->ctx.pmu);
523 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
524 }
525 }
526
527 rcu_read_unlock();
528
529 local_irq_restore(flags);
530}
531
532static inline void perf_cgroup_sched_out(struct task_struct *task,
533 struct task_struct *next)
534{
535 struct perf_cgroup *cgrp1;
536 struct perf_cgroup *cgrp2 = NULL;
537
538
539
540
541 cgrp1 = perf_cgroup_from_task(task);
542
543
544
545
546
547 if (next)
548 cgrp2 = perf_cgroup_from_task(next);
549
550
551
552
553
554
555 if (cgrp1 != cgrp2)
556 perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
557}
558
559static inline void perf_cgroup_sched_in(struct task_struct *prev,
560 struct task_struct *task)
561{
562 struct perf_cgroup *cgrp1;
563 struct perf_cgroup *cgrp2 = NULL;
564
565
566
567
568 cgrp1 = perf_cgroup_from_task(task);
569
570
571 cgrp2 = perf_cgroup_from_task(prev);
572
573
574
575
576
577
578 if (cgrp1 != cgrp2)
579 perf_cgroup_switch(task, PERF_CGROUP_SWIN);
580}
581
582static inline int perf_cgroup_connect(int fd, struct perf_event *event,
583 struct perf_event_attr *attr,
584 struct perf_event *group_leader)
585{
586 struct perf_cgroup *cgrp;
587 struct cgroup_subsys_state *css;
588 struct fd f = fdget(fd);
589 int ret = 0;
590
591 if (!f.file)
592 return -EBADF;
593
594 css = cgroup_css_from_dir(f.file, perf_subsys_id);
595 if (IS_ERR(css)) {
596 ret = PTR_ERR(css);
597 goto out;
598 }
599
600 cgrp = container_of(css, struct perf_cgroup, css);
601 event->cgrp = cgrp;
602
603
604 if (!perf_tryget_cgroup(event)) {
605 event->cgrp = NULL;
606 ret = -ENOENT;
607 goto out;
608 }
609
610
611
612
613
614
615 if (group_leader && group_leader->cgrp != cgrp) {
616 perf_detach_cgroup(event);
617 ret = -EINVAL;
618 }
619out:
620 fdput(f);
621 return ret;
622}
623
624static inline void
625perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
626{
627 struct perf_cgroup_info *t;
628 t = per_cpu_ptr(event->cgrp->info, event->cpu);
629 event->shadow_ctx_time = now - t->timestamp;
630}
631
632static inline void
633perf_cgroup_defer_enabled(struct perf_event *event)
634{
635
636
637
638
639
640
641 if (is_cgroup_event(event) && !perf_cgroup_match(event))
642 event->cgrp_defer_enabled = 1;
643}
644
645static inline void
646perf_cgroup_mark_enabled(struct perf_event *event,
647 struct perf_event_context *ctx)
648{
649 struct perf_event *sub;
650 u64 tstamp = perf_event_time(event);
651
652 if (!event->cgrp_defer_enabled)
653 return;
654
655 event->cgrp_defer_enabled = 0;
656
657 event->tstamp_enabled = tstamp - event->total_time_enabled;
658 list_for_each_entry(sub, &event->sibling_list, group_entry) {
659 if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
660 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
661 sub->cgrp_defer_enabled = 0;
662 }
663 }
664}
665#else
666
667static inline bool
668perf_cgroup_match(struct perf_event *event)
669{
670 return true;
671}
672
673static inline void perf_detach_cgroup(struct perf_event *event)
674{}
675
676static inline int is_cgroup_event(struct perf_event *event)
677{
678 return 0;
679}
680
681static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
682{
683 return 0;
684}
685
686static inline void update_cgrp_time_from_event(struct perf_event *event)
687{
688}
689
690static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
691{
692}
693
694static inline void perf_cgroup_sched_out(struct task_struct *task,
695 struct task_struct *next)
696{
697}
698
699static inline void perf_cgroup_sched_in(struct task_struct *prev,
700 struct task_struct *task)
701{
702}
703
704static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
705 struct perf_event_attr *attr,
706 struct perf_event *group_leader)
707{
708 return -EINVAL;
709}
710
711static inline void
712perf_cgroup_set_timestamp(struct task_struct *task,
713 struct perf_event_context *ctx)
714{
715}
716
717void
718perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
719{
720}
721
722static inline void
723perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
724{
725}
726
727static inline u64 perf_cgroup_event_time(struct perf_event *event)
728{
729 return 0;
730}
731
732static inline void
733perf_cgroup_defer_enabled(struct perf_event *event)
734{
735}
736
737static inline void
738perf_cgroup_mark_enabled(struct perf_event *event,
739 struct perf_event_context *ctx)
740{
741}
742#endif
743
744
745
746
747
748#define PERF_CPU_HRTIMER (1000 / HZ)
749
750
751
752static enum hrtimer_restart perf_cpu_hrtimer_handler(struct hrtimer *hr)
753{
754 struct perf_cpu_context *cpuctx;
755 enum hrtimer_restart ret = HRTIMER_NORESTART;
756 int rotations = 0;
757
758 WARN_ON(!irqs_disabled());
759
760 cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
761
762 rotations = perf_rotate_context(cpuctx);
763
764
765
766
767 if (rotations) {
768 hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
769 ret = HRTIMER_RESTART;
770 }
771
772 return ret;
773}
774
775
776void perf_cpu_hrtimer_cancel(int cpu)
777{
778 struct perf_cpu_context *cpuctx;
779 struct pmu *pmu;
780 unsigned long flags;
781
782 if (WARN_ON(cpu != smp_processor_id()))
783 return;
784
785 local_irq_save(flags);
786
787 rcu_read_lock();
788
789 list_for_each_entry_rcu(pmu, &pmus, entry) {
790 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
791
792 if (pmu->task_ctx_nr == perf_sw_context)
793 continue;
794
795 hrtimer_cancel(&cpuctx->hrtimer);
796 }
797
798 rcu_read_unlock();
799
800 local_irq_restore(flags);
801}
802
803static void __perf_cpu_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
804{
805 struct hrtimer *hr = &cpuctx->hrtimer;
806 struct pmu *pmu = cpuctx->ctx.pmu;
807 int timer;
808
809
810 if (pmu->task_ctx_nr == perf_sw_context)
811 return;
812
813
814
815
816
817 timer = pmu->hrtimer_interval_ms;
818 if (timer < 1)
819 timer = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
820
821 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
822
823 hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
824 hr->function = perf_cpu_hrtimer_handler;
825}
826
827static void perf_cpu_hrtimer_restart(struct perf_cpu_context *cpuctx)
828{
829 struct hrtimer *hr = &cpuctx->hrtimer;
830 struct pmu *pmu = cpuctx->ctx.pmu;
831
832
833 if (pmu->task_ctx_nr == perf_sw_context)
834 return;
835
836 if (hrtimer_active(hr))
837 return;
838
839 if (!hrtimer_callback_running(hr))
840 __hrtimer_start_range_ns(hr, cpuctx->hrtimer_interval,
841 0, HRTIMER_MODE_REL_PINNED, 0);
842}
843
844void perf_pmu_disable(struct pmu *pmu)
845{
846 int *count = this_cpu_ptr(pmu->pmu_disable_count);
847 if (!(*count)++)
848 pmu->pmu_disable(pmu);
849}
850
851void perf_pmu_enable(struct pmu *pmu)
852{
853 int *count = this_cpu_ptr(pmu->pmu_disable_count);
854 if (!--(*count))
855 pmu->pmu_enable(pmu);
856}
857
858static DEFINE_PER_CPU(struct list_head, rotation_list);
859
860
861
862
863
864
865static void perf_pmu_rotate_start(struct pmu *pmu)
866{
867 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
868 struct list_head *head = &__get_cpu_var(rotation_list);
869
870 WARN_ON(!irqs_disabled());
871
872 if (list_empty(&cpuctx->rotation_list)) {
873 int was_empty = list_empty(head);
874 list_add(&cpuctx->rotation_list, head);
875 if (was_empty)
876 tick_nohz_full_kick();
877 }
878}
879
880static void get_ctx(struct perf_event_context *ctx)
881{
882 WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
883}
884
885static void put_ctx(struct perf_event_context *ctx)
886{
887 if (atomic_dec_and_test(&ctx->refcount)) {
888 if (ctx->parent_ctx)
889 put_ctx(ctx->parent_ctx);
890 if (ctx->task)
891 put_task_struct(ctx->task);
892 kfree_rcu(ctx, rcu_head);
893 }
894}
895
896static void unclone_ctx(struct perf_event_context *ctx)
897{
898 if (ctx->parent_ctx) {
899 put_ctx(ctx->parent_ctx);
900 ctx->parent_ctx = NULL;
901 }
902}
903
904static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
905{
906
907
908
909 if (event->parent)
910 event = event->parent;
911
912 return task_tgid_nr_ns(p, event->ns);
913}
914
915static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
916{
917
918
919
920 if (event->parent)
921 event = event->parent;
922
923 return task_pid_nr_ns(p, event->ns);
924}
925
926
927
928
929
930static u64 primary_event_id(struct perf_event *event)
931{
932 u64 id = event->id;
933
934 if (event->parent)
935 id = event->parent->id;
936
937 return id;
938}
939
940
941
942
943
944
945static struct perf_event_context *
946perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
947{
948 struct perf_event_context *ctx;
949
950retry:
951
952
953
954
955
956
957
958
959
960 preempt_disable();
961 rcu_read_lock();
962 ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
963 if (ctx) {
964
965
966
967
968
969
970
971
972
973
974 raw_spin_lock_irqsave(&ctx->lock, *flags);
975 if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
976 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
977 rcu_read_unlock();
978 preempt_enable();
979 goto retry;
980 }
981
982 if (!atomic_inc_not_zero(&ctx->refcount)) {
983 raw_spin_unlock_irqrestore(&ctx->lock, *flags);
984 ctx = NULL;
985 }
986 }
987 rcu_read_unlock();
988 preempt_enable();
989 return ctx;
990}
991
992
993
994
995
996
997static struct perf_event_context *
998perf_pin_task_context(struct task_struct *task, int ctxn)
999{
1000 struct perf_event_context *ctx;
1001 unsigned long flags;
1002
1003 ctx = perf_lock_task_context(task, ctxn, &flags);
1004 if (ctx) {
1005 ++ctx->pin_count;
1006 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1007 }
1008 return ctx;
1009}
1010
1011static void perf_unpin_context(struct perf_event_context *ctx)
1012{
1013 unsigned long flags;
1014
1015 raw_spin_lock_irqsave(&ctx->lock, flags);
1016 --ctx->pin_count;
1017 raw_spin_unlock_irqrestore(&ctx->lock, flags);
1018}
1019
1020
1021
1022
1023static void update_context_time(struct perf_event_context *ctx)
1024{
1025 u64 now = perf_clock();
1026
1027 ctx->time += now - ctx->timestamp;
1028 ctx->timestamp = now;
1029}
1030
1031static u64 perf_event_time(struct perf_event *event)
1032{
1033 struct perf_event_context *ctx = event->ctx;
1034
1035 if (is_cgroup_event(event))
1036 return perf_cgroup_event_time(event);
1037
1038 return ctx ? ctx->time : 0;
1039}
1040
1041
1042
1043
1044
1045static void update_event_times(struct perf_event *event)
1046{
1047 struct perf_event_context *ctx = event->ctx;
1048 u64 run_end;
1049
1050 if (event->state < PERF_EVENT_STATE_INACTIVE ||
1051 event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
1052 return;
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063 if (is_cgroup_event(event))
1064 run_end = perf_cgroup_event_time(event);
1065 else if (ctx->is_active)
1066 run_end = ctx->time;
1067 else
1068 run_end = event->tstamp_stopped;
1069
1070 event->total_time_enabled = run_end - event->tstamp_enabled;
1071
1072 if (event->state == PERF_EVENT_STATE_INACTIVE)
1073 run_end = event->tstamp_stopped;
1074 else
1075 run_end = perf_event_time(event);
1076
1077 event->total_time_running = run_end - event->tstamp_running;
1078
1079}
1080
1081
1082
1083
1084static void update_group_times(struct perf_event *leader)
1085{
1086 struct perf_event *event;
1087
1088 update_event_times(leader);
1089 list_for_each_entry(event, &leader->sibling_list, group_entry)
1090 update_event_times(event);
1091}
1092
1093static struct list_head *
1094ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
1095{
1096 if (event->attr.pinned)
1097 return &ctx->pinned_groups;
1098 else
1099 return &ctx->flexible_groups;
1100}
1101
1102
1103
1104
1105
1106static void
1107list_add_event(struct perf_event *event, struct perf_event_context *ctx)
1108{
1109 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
1110 event->attach_state |= PERF_ATTACH_CONTEXT;
1111
1112
1113
1114
1115
1116
1117 if (event->group_leader == event) {
1118 struct list_head *list;
1119
1120 if (is_software_event(event))
1121 event->group_flags |= PERF_GROUP_SOFTWARE;
1122
1123 list = ctx_group_list(event, ctx);
1124 list_add_tail(&event->group_entry, list);
1125 }
1126
1127 if (is_cgroup_event(event))
1128 ctx->nr_cgroups++;
1129
1130 if (has_branch_stack(event))
1131 ctx->nr_branch_stack++;
1132
1133 list_add_rcu(&event->event_entry, &ctx->event_list);
1134 if (!ctx->nr_events)
1135 perf_pmu_rotate_start(ctx->pmu);
1136 ctx->nr_events++;
1137 if (event->attr.inherit_stat)
1138 ctx->nr_stat++;
1139}
1140
1141
1142
1143
1144static inline void perf_event__state_init(struct perf_event *event)
1145{
1146 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
1147 PERF_EVENT_STATE_INACTIVE;
1148}
1149
1150
1151
1152
1153
1154static void perf_event__read_size(struct perf_event *event)
1155{
1156 int entry = sizeof(u64);
1157 int size = 0;
1158 int nr = 1;
1159
1160 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1161 size += sizeof(u64);
1162
1163 if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1164 size += sizeof(u64);
1165
1166 if (event->attr.read_format & PERF_FORMAT_ID)
1167 entry += sizeof(u64);
1168
1169 if (event->attr.read_format & PERF_FORMAT_GROUP) {
1170 nr += event->group_leader->nr_siblings;
1171 size += sizeof(u64);
1172 }
1173
1174 size += entry * nr;
1175 event->read_size = size;
1176}
1177
1178static void perf_event__header_size(struct perf_event *event)
1179{
1180 struct perf_sample_data *data;
1181 u64 sample_type = event->attr.sample_type;
1182 u16 size = 0;
1183
1184 perf_event__read_size(event);
1185
1186 if (sample_type & PERF_SAMPLE_IP)
1187 size += sizeof(data->ip);
1188
1189 if (sample_type & PERF_SAMPLE_ADDR)
1190 size += sizeof(data->addr);
1191
1192 if (sample_type & PERF_SAMPLE_PERIOD)
1193 size += sizeof(data->period);
1194
1195 if (sample_type & PERF_SAMPLE_WEIGHT)
1196 size += sizeof(data->weight);
1197
1198 if (sample_type & PERF_SAMPLE_READ)
1199 size += event->read_size;
1200
1201 if (sample_type & PERF_SAMPLE_DATA_SRC)
1202 size += sizeof(data->data_src.val);
1203
1204 event->header_size = size;
1205}
1206
1207static void perf_event__id_header_size(struct perf_event *event)
1208{
1209 struct perf_sample_data *data;
1210 u64 sample_type = event->attr.sample_type;
1211 u16 size = 0;
1212
1213 if (sample_type & PERF_SAMPLE_TID)
1214 size += sizeof(data->tid_entry);
1215
1216 if (sample_type & PERF_SAMPLE_TIME)
1217 size += sizeof(data->time);
1218
1219 if (sample_type & PERF_SAMPLE_ID)
1220 size += sizeof(data->id);
1221
1222 if (sample_type & PERF_SAMPLE_STREAM_ID)
1223 size += sizeof(data->stream_id);
1224
1225 if (sample_type & PERF_SAMPLE_CPU)
1226 size += sizeof(data->cpu_entry);
1227
1228 event->id_header_size = size;
1229}
1230
1231static void perf_group_attach(struct perf_event *event)
1232{
1233 struct perf_event *group_leader = event->group_leader, *pos;
1234
1235
1236
1237
1238 if (event->attach_state & PERF_ATTACH_GROUP)
1239 return;
1240
1241 event->attach_state |= PERF_ATTACH_GROUP;
1242
1243 if (group_leader == event)
1244 return;
1245
1246 if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
1247 !is_software_event(event))
1248 group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
1249
1250 list_add_tail(&event->group_entry, &group_leader->sibling_list);
1251 group_leader->nr_siblings++;
1252
1253 perf_event__header_size(group_leader);
1254
1255 list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1256 perf_event__header_size(pos);
1257}
1258
1259
1260
1261
1262
1263static void
1264list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1265{
1266 struct perf_cpu_context *cpuctx;
1267
1268
1269
1270 if (!(event->attach_state & PERF_ATTACH_CONTEXT))
1271 return;
1272
1273 event->attach_state &= ~PERF_ATTACH_CONTEXT;
1274
1275 if (is_cgroup_event(event)) {
1276 ctx->nr_cgroups--;
1277 cpuctx = __get_cpu_context(ctx);
1278
1279
1280
1281
1282
1283 if (!ctx->nr_cgroups)
1284 cpuctx->cgrp = NULL;
1285 }
1286
1287 if (has_branch_stack(event))
1288 ctx->nr_branch_stack--;
1289
1290 ctx->nr_events--;
1291 if (event->attr.inherit_stat)
1292 ctx->nr_stat--;
1293
1294 list_del_rcu(&event->event_entry);
1295
1296 if (event->group_leader == event)
1297 list_del_init(&event->group_entry);
1298
1299 update_group_times(event);
1300
1301
1302
1303
1304
1305
1306
1307
1308 if (event->state > PERF_EVENT_STATE_OFF)
1309 event->state = PERF_EVENT_STATE_OFF;
1310}
1311
1312static void perf_group_detach(struct perf_event *event)
1313{
1314 struct perf_event *sibling, *tmp;
1315 struct list_head *list = NULL;
1316
1317
1318
1319
1320 if (!(event->attach_state & PERF_ATTACH_GROUP))
1321 return;
1322
1323 event->attach_state &= ~PERF_ATTACH_GROUP;
1324
1325
1326
1327
1328 if (event->group_leader != event) {
1329 list_del_init(&event->group_entry);
1330 event->group_leader->nr_siblings--;
1331 goto out;
1332 }
1333
1334 if (!list_empty(&event->group_entry))
1335 list = &event->group_entry;
1336
1337
1338
1339
1340
1341
1342 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
1343 if (list)
1344 list_move_tail(&sibling->group_entry, list);
1345 sibling->group_leader = sibling;
1346
1347
1348 sibling->group_flags = event->group_flags;
1349 }
1350
1351out:
1352 perf_event__header_size(event->group_leader);
1353
1354 list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1355 perf_event__header_size(tmp);
1356}
1357
1358static inline int
1359event_filter_match(struct perf_event *event)
1360{
1361 return (event->cpu == -1 || event->cpu == smp_processor_id())
1362 && perf_cgroup_match(event);
1363}
1364
1365static void
1366event_sched_out(struct perf_event *event,
1367 struct perf_cpu_context *cpuctx,
1368 struct perf_event_context *ctx)
1369{
1370 u64 tstamp = perf_event_time(event);
1371 u64 delta;
1372
1373
1374
1375
1376
1377
1378 if (event->state == PERF_EVENT_STATE_INACTIVE
1379 && !event_filter_match(event)) {
1380 delta = tstamp - event->tstamp_stopped;
1381 event->tstamp_running += delta;
1382 event->tstamp_stopped = tstamp;
1383 }
1384
1385 if (event->state != PERF_EVENT_STATE_ACTIVE)
1386 return;
1387
1388 event->state = PERF_EVENT_STATE_INACTIVE;
1389 if (event->pending_disable) {
1390 event->pending_disable = 0;
1391 event->state = PERF_EVENT_STATE_OFF;
1392 }
1393 event->tstamp_stopped = tstamp;
1394 event->pmu->del(event, 0);
1395 event->oncpu = -1;
1396
1397 if (!is_software_event(event))
1398 cpuctx->active_oncpu--;
1399 ctx->nr_active--;
1400 if (event->attr.freq && event->attr.sample_freq)
1401 ctx->nr_freq--;
1402 if (event->attr.exclusive || !cpuctx->active_oncpu)
1403 cpuctx->exclusive = 0;
1404}
1405
1406static void
1407group_sched_out(struct perf_event *group_event,
1408 struct perf_cpu_context *cpuctx,
1409 struct perf_event_context *ctx)
1410{
1411 struct perf_event *event;
1412 int state = group_event->state;
1413
1414 event_sched_out(group_event, cpuctx, ctx);
1415
1416
1417
1418
1419 list_for_each_entry(event, &group_event->sibling_list, group_entry)
1420 event_sched_out(event, cpuctx, ctx);
1421
1422 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
1423 cpuctx->exclusive = 0;
1424}
1425
1426
1427
1428
1429
1430
1431
1432static int __perf_remove_from_context(void *info)
1433{
1434 struct perf_event *event = info;
1435 struct perf_event_context *ctx = event->ctx;
1436 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1437
1438 raw_spin_lock(&ctx->lock);
1439 event_sched_out(event, cpuctx, ctx);
1440 list_del_event(event, ctx);
1441 if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1442 ctx->is_active = 0;
1443 cpuctx->task_ctx = NULL;
1444 }
1445 raw_spin_unlock(&ctx->lock);
1446
1447 return 0;
1448}
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464static void perf_remove_from_context(struct perf_event *event)
1465{
1466 struct perf_event_context *ctx = event->ctx;
1467 struct task_struct *task = ctx->task;
1468
1469 lockdep_assert_held(&ctx->mutex);
1470
1471 if (!task) {
1472
1473
1474
1475
1476 cpu_function_call(event->cpu, __perf_remove_from_context, event);
1477 return;
1478 }
1479
1480retry:
1481 if (!task_function_call(task, __perf_remove_from_context, event))
1482 return;
1483
1484 raw_spin_lock_irq(&ctx->lock);
1485
1486
1487
1488
1489 if (ctx->is_active) {
1490 raw_spin_unlock_irq(&ctx->lock);
1491 goto retry;
1492 }
1493
1494
1495
1496
1497
1498 list_del_event(event, ctx);
1499 raw_spin_unlock_irq(&ctx->lock);
1500}
1501
1502
1503
1504
1505int __perf_event_disable(void *info)
1506{
1507 struct perf_event *event = info;
1508 struct perf_event_context *ctx = event->ctx;
1509 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1510
1511
1512
1513
1514
1515
1516
1517
1518 if (ctx->task && cpuctx->task_ctx != ctx)
1519 return -EINVAL;
1520
1521 raw_spin_lock(&ctx->lock);
1522
1523
1524
1525
1526
1527 if (event->state >= PERF_EVENT_STATE_INACTIVE) {
1528 update_context_time(ctx);
1529 update_cgrp_time_from_event(event);
1530 update_group_times(event);
1531 if (event == event->group_leader)
1532 group_sched_out(event, cpuctx, ctx);
1533 else
1534 event_sched_out(event, cpuctx, ctx);
1535 event->state = PERF_EVENT_STATE_OFF;
1536 }
1537
1538 raw_spin_unlock(&ctx->lock);
1539
1540 return 0;
1541}
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556void perf_event_disable(struct perf_event *event)
1557{
1558 struct perf_event_context *ctx = event->ctx;
1559 struct task_struct *task = ctx->task;
1560
1561 if (!task) {
1562
1563
1564
1565 cpu_function_call(event->cpu, __perf_event_disable, event);
1566 return;
1567 }
1568
1569retry:
1570 if (!task_function_call(task, __perf_event_disable, event))
1571 return;
1572
1573 raw_spin_lock_irq(&ctx->lock);
1574
1575
1576
1577 if (event->state == PERF_EVENT_STATE_ACTIVE) {
1578 raw_spin_unlock_irq(&ctx->lock);
1579
1580
1581
1582
1583 task = ctx->task;
1584 goto retry;
1585 }
1586
1587
1588
1589
1590
1591 if (event->state == PERF_EVENT_STATE_INACTIVE) {
1592 update_group_times(event);
1593 event->state = PERF_EVENT_STATE_OFF;
1594 }
1595 raw_spin_unlock_irq(&ctx->lock);
1596}
1597EXPORT_SYMBOL_GPL(perf_event_disable);
1598
1599static void perf_set_shadow_time(struct perf_event *event,
1600 struct perf_event_context *ctx,
1601 u64 tstamp)
1602{
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628 if (is_cgroup_event(event))
1629 perf_cgroup_set_shadow_time(event, tstamp);
1630 else
1631 event->shadow_ctx_time = tstamp - ctx->timestamp;
1632}
1633
1634#define MAX_INTERRUPTS (~0ULL)
1635
1636static void perf_log_throttle(struct perf_event *event, int enable);
1637
1638static int
1639event_sched_in(struct perf_event *event,
1640 struct perf_cpu_context *cpuctx,
1641 struct perf_event_context *ctx)
1642{
1643 u64 tstamp = perf_event_time(event);
1644
1645 if (event->state <= PERF_EVENT_STATE_OFF)
1646 return 0;
1647
1648 event->state = PERF_EVENT_STATE_ACTIVE;
1649 event->oncpu = smp_processor_id();
1650
1651
1652
1653
1654
1655
1656 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
1657 perf_log_throttle(event, 1);
1658 event->hw.interrupts = 0;
1659 }
1660
1661
1662
1663
1664 smp_wmb();
1665
1666 if (event->pmu->add(event, PERF_EF_START)) {
1667 event->state = PERF_EVENT_STATE_INACTIVE;
1668 event->oncpu = -1;
1669 return -EAGAIN;
1670 }
1671
1672 event->tstamp_running += tstamp - event->tstamp_stopped;
1673
1674 perf_set_shadow_time(event, ctx, tstamp);
1675
1676 if (!is_software_event(event))
1677 cpuctx->active_oncpu++;
1678 ctx->nr_active++;
1679 if (event->attr.freq && event->attr.sample_freq)
1680 ctx->nr_freq++;
1681
1682 if (event->attr.exclusive)
1683 cpuctx->exclusive = 1;
1684
1685 return 0;
1686}
1687
1688static int
1689group_sched_in(struct perf_event *group_event,
1690 struct perf_cpu_context *cpuctx,
1691 struct perf_event_context *ctx)
1692{
1693 struct perf_event *event, *partial_group = NULL;
1694 struct pmu *pmu = group_event->pmu;
1695 u64 now = ctx->time;
1696 bool simulate = false;
1697
1698 if (group_event->state == PERF_EVENT_STATE_OFF)
1699 return 0;
1700
1701 pmu->start_txn(pmu);
1702
1703 if (event_sched_in(group_event, cpuctx, ctx)) {
1704 pmu->cancel_txn(pmu);
1705 perf_cpu_hrtimer_restart(cpuctx);
1706 return -EAGAIN;
1707 }
1708
1709
1710
1711
1712 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1713 if (event_sched_in(event, cpuctx, ctx)) {
1714 partial_group = event;
1715 goto group_error;
1716 }
1717 }
1718
1719 if (!pmu->commit_txn(pmu))
1720 return 0;
1721
1722group_error:
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737 list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1738 if (event == partial_group)
1739 simulate = true;
1740
1741 if (simulate) {
1742 event->tstamp_running += now - event->tstamp_stopped;
1743 event->tstamp_stopped = now;
1744 } else {
1745 event_sched_out(event, cpuctx, ctx);
1746 }
1747 }
1748 event_sched_out(group_event, cpuctx, ctx);
1749
1750 pmu->cancel_txn(pmu);
1751
1752 perf_cpu_hrtimer_restart(cpuctx);
1753
1754 return -EAGAIN;
1755}
1756
1757
1758
1759
1760static int group_can_go_on(struct perf_event *event,
1761 struct perf_cpu_context *cpuctx,
1762 int can_add_hw)
1763{
1764
1765
1766
1767 if (event->group_flags & PERF_GROUP_SOFTWARE)
1768 return 1;
1769
1770
1771
1772
1773 if (cpuctx->exclusive)
1774 return 0;
1775
1776
1777
1778
1779 if (event->attr.exclusive && cpuctx->active_oncpu)
1780 return 0;
1781
1782
1783
1784
1785 return can_add_hw;
1786}
1787
1788static void add_event_to_ctx(struct perf_event *event,
1789 struct perf_event_context *ctx)
1790{
1791 u64 tstamp = perf_event_time(event);
1792
1793 list_add_event(event, ctx);
1794 perf_group_attach(event);
1795 event->tstamp_enabled = tstamp;
1796 event->tstamp_running = tstamp;
1797 event->tstamp_stopped = tstamp;
1798}
1799
1800static void task_ctx_sched_out(struct perf_event_context *ctx);
1801static void
1802ctx_sched_in(struct perf_event_context *ctx,
1803 struct perf_cpu_context *cpuctx,
1804 enum event_type_t event_type,
1805 struct task_struct *task);
1806
1807static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
1808 struct perf_event_context *ctx,
1809 struct task_struct *task)
1810{
1811 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
1812 if (ctx)
1813 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
1814 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
1815 if (ctx)
1816 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
1817}
1818
1819
1820
1821
1822
1823
1824static int __perf_install_in_context(void *info)
1825{
1826 struct perf_event *event = info;
1827 struct perf_event_context *ctx = event->ctx;
1828 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1829 struct perf_event_context *task_ctx = cpuctx->task_ctx;
1830 struct task_struct *task = current;
1831
1832 perf_ctx_lock(cpuctx, task_ctx);
1833 perf_pmu_disable(cpuctx->ctx.pmu);
1834
1835
1836
1837
1838 if (task_ctx)
1839 task_ctx_sched_out(task_ctx);
1840
1841
1842
1843
1844
1845 if (ctx->task && task_ctx != ctx) {
1846 if (task_ctx)
1847 raw_spin_unlock(&task_ctx->lock);
1848 raw_spin_lock(&ctx->lock);
1849 task_ctx = ctx;
1850 }
1851
1852 if (task_ctx) {
1853 cpuctx->task_ctx = task_ctx;
1854 task = task_ctx->task;
1855 }
1856
1857 cpu_ctx_sched_out(cpuctx, EVENT_ALL);
1858
1859 update_context_time(ctx);
1860
1861
1862
1863
1864
1865 update_cgrp_time_from_event(event);
1866
1867 add_event_to_ctx(event, ctx);
1868
1869
1870
1871
1872 perf_event_sched_in(cpuctx, task_ctx, task);
1873
1874 perf_pmu_enable(cpuctx->ctx.pmu);
1875 perf_ctx_unlock(cpuctx, task_ctx);
1876
1877 return 0;
1878}
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890static void
1891perf_install_in_context(struct perf_event_context *ctx,
1892 struct perf_event *event,
1893 int cpu)
1894{
1895 struct task_struct *task = ctx->task;
1896
1897 lockdep_assert_held(&ctx->mutex);
1898
1899 event->ctx = ctx;
1900 if (event->cpu != -1)
1901 event->cpu = cpu;
1902
1903 if (!task) {
1904
1905
1906
1907
1908 cpu_function_call(cpu, __perf_install_in_context, event);
1909 return;
1910 }
1911
1912retry:
1913 if (!task_function_call(task, __perf_install_in_context, event))
1914 return;
1915
1916 raw_spin_lock_irq(&ctx->lock);
1917
1918
1919
1920
1921 if (ctx->is_active) {
1922 raw_spin_unlock_irq(&ctx->lock);
1923 goto retry;
1924 }
1925
1926
1927
1928
1929
1930 add_event_to_ctx(event, ctx);
1931 raw_spin_unlock_irq(&ctx->lock);
1932}
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942static void __perf_event_mark_enabled(struct perf_event *event)
1943{
1944 struct perf_event *sub;
1945 u64 tstamp = perf_event_time(event);
1946
1947 event->state = PERF_EVENT_STATE_INACTIVE;
1948 event->tstamp_enabled = tstamp - event->total_time_enabled;
1949 list_for_each_entry(sub, &event->sibling_list, group_entry) {
1950 if (sub->state >= PERF_EVENT_STATE_INACTIVE)
1951 sub->tstamp_enabled = tstamp - sub->total_time_enabled;
1952 }
1953}
1954
1955
1956
1957
1958static int __perf_event_enable(void *info)
1959{
1960 struct perf_event *event = info;
1961 struct perf_event_context *ctx = event->ctx;
1962 struct perf_event *leader = event->group_leader;
1963 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1964 int err;
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975 if (!ctx->is_active)
1976 return -EINVAL;
1977
1978 raw_spin_lock(&ctx->lock);
1979 update_context_time(ctx);
1980
1981 if (event->state >= PERF_EVENT_STATE_INACTIVE)
1982 goto unlock;
1983
1984
1985
1986
1987 perf_cgroup_set_timestamp(current, ctx);
1988
1989 __perf_event_mark_enabled(event);
1990
1991 if (!event_filter_match(event)) {
1992 if (is_cgroup_event(event))
1993 perf_cgroup_defer_enabled(event);
1994 goto unlock;
1995 }
1996
1997
1998
1999
2000
2001 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
2002 goto unlock;
2003
2004 if (!group_can_go_on(event, cpuctx, 1)) {
2005 err = -EEXIST;
2006 } else {
2007 if (event == leader)
2008 err = group_sched_in(event, cpuctx, ctx);
2009 else
2010 err = event_sched_in(event, cpuctx, ctx);
2011 }
2012
2013 if (err) {
2014
2015
2016
2017
2018 if (leader != event) {
2019 group_sched_out(leader, cpuctx, ctx);
2020 perf_cpu_hrtimer_restart(cpuctx);
2021 }
2022 if (leader->attr.pinned) {
2023 update_group_times(leader);
2024 leader->state = PERF_EVENT_STATE_ERROR;
2025 }
2026 }
2027
2028unlock:
2029 raw_spin_unlock(&ctx->lock);
2030
2031 return 0;
2032}
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043void perf_event_enable(struct perf_event *event)
2044{
2045 struct perf_event_context *ctx = event->ctx;
2046 struct task_struct *task = ctx->task;
2047
2048 if (!task) {
2049
2050
2051
2052 cpu_function_call(event->cpu, __perf_event_enable, event);
2053 return;
2054 }
2055
2056 raw_spin_lock_irq(&ctx->lock);
2057 if (event->state >= PERF_EVENT_STATE_INACTIVE)
2058 goto out;
2059
2060
2061
2062
2063
2064
2065
2066
2067 if (event->state == PERF_EVENT_STATE_ERROR)
2068 event->state = PERF_EVENT_STATE_OFF;
2069
2070retry:
2071 if (!ctx->is_active) {
2072 __perf_event_mark_enabled(event);
2073 goto out;
2074 }
2075
2076 raw_spin_unlock_irq(&ctx->lock);
2077
2078 if (!task_function_call(task, __perf_event_enable, event))
2079 return;
2080
2081 raw_spin_lock_irq(&ctx->lock);
2082
2083
2084
2085
2086
2087 if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
2088
2089
2090
2091
2092 task = ctx->task;
2093 goto retry;
2094 }
2095
2096out:
2097 raw_spin_unlock_irq(&ctx->lock);
2098}
2099EXPORT_SYMBOL_GPL(perf_event_enable);
2100
2101int perf_event_refresh(struct perf_event *event, int refresh)
2102{
2103
2104
2105
2106 if (event->attr.inherit || !is_sampling_event(event))
2107 return -EINVAL;
2108
2109 atomic_add(refresh, &event->event_limit);
2110 perf_event_enable(event);
2111
2112 return 0;
2113}
2114EXPORT_SYMBOL_GPL(perf_event_refresh);
2115
2116static void ctx_sched_out(struct perf_event_context *ctx,
2117 struct perf_cpu_context *cpuctx,
2118 enum event_type_t event_type)
2119{
2120 struct perf_event *event;
2121 int is_active = ctx->is_active;
2122
2123 ctx->is_active &= ~event_type;
2124 if (likely(!ctx->nr_events))
2125 return;
2126
2127 update_context_time(ctx);
2128 update_cgrp_time_from_cpuctx(cpuctx);
2129 if (!ctx->nr_active)
2130 return;
2131
2132 perf_pmu_disable(ctx->pmu);
2133 if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
2134 list_for_each_entry(event, &ctx->pinned_groups, group_entry)
2135 group_sched_out(event, cpuctx, ctx);
2136 }
2137
2138 if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
2139 list_for_each_entry(event, &ctx->flexible_groups, group_entry)
2140 group_sched_out(event, cpuctx, ctx);
2141 }
2142 perf_pmu_enable(ctx->pmu);
2143}
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156static int context_equiv(struct perf_event_context *ctx1,
2157 struct perf_event_context *ctx2)
2158{
2159 return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
2160 && ctx1->parent_gen == ctx2->parent_gen
2161 && !ctx1->pin_count && !ctx2->pin_count;
2162}
2163
2164static void __perf_event_sync_stat(struct perf_event *event,
2165 struct perf_event *next_event)
2166{
2167 u64 value;
2168
2169 if (!event->attr.inherit_stat)
2170 return;
2171
2172
2173
2174
2175
2176
2177
2178
2179 switch (event->state) {
2180 case PERF_EVENT_STATE_ACTIVE:
2181 event->pmu->read(event);
2182
2183
2184 case PERF_EVENT_STATE_INACTIVE:
2185 update_event_times(event);
2186 break;
2187
2188 default:
2189 break;
2190 }
2191
2192
2193
2194
2195
2196 value = local64_read(&next_event->count);
2197 value = local64_xchg(&event->count, value);
2198 local64_set(&next_event->count, value);
2199
2200 swap(event->total_time_enabled, next_event->total_time_enabled);
2201 swap(event->total_time_running, next_event->total_time_running);
2202
2203
2204
2205
2206 perf_event_update_userpage(event);
2207 perf_event_update_userpage(next_event);
2208}
2209
2210#define list_next_entry(pos, member) \
2211 list_entry(pos->member.next, typeof(*pos), member)
2212
2213static void perf_event_sync_stat(struct perf_event_context *ctx,
2214 struct perf_event_context *next_ctx)
2215{
2216 struct perf_event *event, *next_event;
2217
2218 if (!ctx->nr_stat)
2219 return;
2220
2221 update_context_time(ctx);
2222
2223 event = list_first_entry(&ctx->event_list,
2224 struct perf_event, event_entry);
2225
2226 next_event = list_first_entry(&next_ctx->event_list,
2227 struct perf_event, event_entry);
2228
2229 while (&event->event_entry != &ctx->event_list &&
2230 &next_event->event_entry != &next_ctx->event_list) {
2231
2232 __perf_event_sync_stat(event, next_event);
2233
2234 event = list_next_entry(event, event_entry);
2235 next_event = list_next_entry(next_event, event_entry);
2236 }
2237}
2238
2239static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2240 struct task_struct *next)
2241{
2242 struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
2243 struct perf_event_context *next_ctx;
2244 struct perf_event_context *parent;
2245 struct perf_cpu_context *cpuctx;
2246 int do_switch = 1;
2247
2248 if (likely(!ctx))
2249 return;
2250
2251 cpuctx = __get_cpu_context(ctx);
2252 if (!cpuctx->task_ctx)
2253 return;
2254
2255 rcu_read_lock();
2256 parent = rcu_dereference(ctx->parent_ctx);
2257 next_ctx = next->perf_event_ctxp[ctxn];
2258 if (parent && next_ctx &&
2259 rcu_dereference(next_ctx->parent_ctx) == parent) {
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269 raw_spin_lock(&ctx->lock);
2270 raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
2271 if (context_equiv(ctx, next_ctx)) {
2272
2273
2274
2275
2276 task->perf_event_ctxp[ctxn] = next_ctx;
2277 next->perf_event_ctxp[ctxn] = ctx;
2278 ctx->task = next;
2279 next_ctx->task = task;
2280 do_switch = 0;
2281
2282 perf_event_sync_stat(ctx, next_ctx);
2283 }
2284 raw_spin_unlock(&next_ctx->lock);
2285 raw_spin_unlock(&ctx->lock);
2286 }
2287 rcu_read_unlock();
2288
2289 if (do_switch) {
2290 raw_spin_lock(&ctx->lock);
2291 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2292 cpuctx->task_ctx = NULL;
2293 raw_spin_unlock(&ctx->lock);
2294 }
2295}
2296
2297#define for_each_task_context_nr(ctxn) \
2298 for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311void __perf_event_task_sched_out(struct task_struct *task,
2312 struct task_struct *next)
2313{
2314 int ctxn;
2315
2316 for_each_task_context_nr(ctxn)
2317 perf_event_context_sched_out(task, ctxn, next);
2318
2319
2320
2321
2322
2323
2324 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2325 perf_cgroup_sched_out(task, next);
2326}
2327
2328static void task_ctx_sched_out(struct perf_event_context *ctx)
2329{
2330 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2331
2332 if (!cpuctx->task_ctx)
2333 return;
2334
2335 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2336 return;
2337
2338 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2339 cpuctx->task_ctx = NULL;
2340}
2341
2342
2343
2344
2345static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2346 enum event_type_t event_type)
2347{
2348 ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
2349}
2350
2351static void
2352ctx_pinned_sched_in(struct perf_event_context *ctx,
2353 struct perf_cpu_context *cpuctx)
2354{
2355 struct perf_event *event;
2356
2357 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2358 if (event->state <= PERF_EVENT_STATE_OFF)
2359 continue;
2360 if (!event_filter_match(event))
2361 continue;
2362
2363
2364 if (is_cgroup_event(event))
2365 perf_cgroup_mark_enabled(event, ctx);
2366
2367 if (group_can_go_on(event, cpuctx, 1))
2368 group_sched_in(event, cpuctx, ctx);
2369
2370
2371
2372
2373
2374 if (event->state == PERF_EVENT_STATE_INACTIVE) {
2375 update_group_times(event);
2376 event->state = PERF_EVENT_STATE_ERROR;
2377 }
2378 }
2379}
2380
2381static void
2382ctx_flexible_sched_in(struct perf_event_context *ctx,
2383 struct perf_cpu_context *cpuctx)
2384{
2385 struct perf_event *event;
2386 int can_add_hw = 1;
2387
2388 list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2389
2390 if (event->state <= PERF_EVENT_STATE_OFF)
2391 continue;
2392
2393
2394
2395
2396 if (!event_filter_match(event))
2397 continue;
2398
2399
2400 if (is_cgroup_event(event))
2401 perf_cgroup_mark_enabled(event, ctx);
2402
2403 if (group_can_go_on(event, cpuctx, can_add_hw)) {
2404 if (group_sched_in(event, cpuctx, ctx))
2405 can_add_hw = 0;
2406 }
2407 }
2408}
2409
2410static void
2411ctx_sched_in(struct perf_event_context *ctx,
2412 struct perf_cpu_context *cpuctx,
2413 enum event_type_t event_type,
2414 struct task_struct *task)
2415{
2416 u64 now;
2417 int is_active = ctx->is_active;
2418
2419 ctx->is_active |= event_type;
2420 if (likely(!ctx->nr_events))
2421 return;
2422
2423 now = perf_clock();
2424 ctx->timestamp = now;
2425 perf_cgroup_set_timestamp(task, ctx);
2426
2427
2428
2429
2430 if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
2431 ctx_pinned_sched_in(ctx, cpuctx);
2432
2433
2434 if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
2435 ctx_flexible_sched_in(ctx, cpuctx);
2436}
2437
2438static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
2439 enum event_type_t event_type,
2440 struct task_struct *task)
2441{
2442 struct perf_event_context *ctx = &cpuctx->ctx;
2443
2444 ctx_sched_in(ctx, cpuctx, event_type, task);
2445}
2446
2447static void perf_event_context_sched_in(struct perf_event_context *ctx,
2448 struct task_struct *task)
2449{
2450 struct perf_cpu_context *cpuctx;
2451
2452 cpuctx = __get_cpu_context(ctx);
2453 if (cpuctx->task_ctx == ctx)
2454 return;
2455
2456 perf_ctx_lock(cpuctx, ctx);
2457 perf_pmu_disable(ctx->pmu);
2458
2459
2460
2461
2462
2463 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2464
2465 if (ctx->nr_events)
2466 cpuctx->task_ctx = ctx;
2467
2468 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
2469
2470 perf_pmu_enable(ctx->pmu);
2471 perf_ctx_unlock(cpuctx, ctx);
2472
2473
2474
2475
2476
2477 perf_pmu_rotate_start(ctx->pmu);
2478}
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492
2493
2494
2495
2496static void perf_branch_stack_sched_in(struct task_struct *prev,
2497 struct task_struct *task)
2498{
2499 struct perf_cpu_context *cpuctx;
2500 struct pmu *pmu;
2501 unsigned long flags;
2502
2503
2504 if (prev == task)
2505 return;
2506
2507 local_irq_save(flags);
2508
2509 rcu_read_lock();
2510
2511 list_for_each_entry_rcu(pmu, &pmus, entry) {
2512 cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2513
2514
2515
2516
2517
2518 if (cpuctx->ctx.nr_branch_stack > 0
2519 && pmu->flush_branch_stack) {
2520
2521 pmu = cpuctx->ctx.pmu;
2522
2523 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2524
2525 perf_pmu_disable(pmu);
2526
2527 pmu->flush_branch_stack();
2528
2529 perf_pmu_enable(pmu);
2530
2531 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2532 }
2533 }
2534
2535 rcu_read_unlock();
2536
2537 local_irq_restore(flags);
2538}
2539
2540
2541
2542
2543
2544
2545
2546
2547
2548
2549
2550
2551void __perf_event_task_sched_in(struct task_struct *prev,
2552 struct task_struct *task)
2553{
2554 struct perf_event_context *ctx;
2555 int ctxn;
2556
2557 for_each_task_context_nr(ctxn) {
2558 ctx = task->perf_event_ctxp[ctxn];
2559 if (likely(!ctx))
2560 continue;
2561
2562 perf_event_context_sched_in(ctx, task);
2563 }
2564
2565
2566
2567
2568
2569 if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2570 perf_cgroup_sched_in(prev, task);
2571
2572
2573 if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
2574 perf_branch_stack_sched_in(prev, task);
2575}
2576
2577static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2578{
2579 u64 frequency = event->attr.sample_freq;
2580 u64 sec = NSEC_PER_SEC;
2581 u64 divisor, dividend;
2582
2583 int count_fls, nsec_fls, frequency_fls, sec_fls;
2584
2585 count_fls = fls64(count);
2586 nsec_fls = fls64(nsec);
2587 frequency_fls = fls64(frequency);
2588 sec_fls = 30;
2589
2590
2591
2592
2593
2594
2595
2596
2597
2598
2599
2600
2601
2602
2603
2604#define REDUCE_FLS(a, b) \
2605do { \
2606 if (a##_fls > b##_fls) { \
2607 a >>= 1; \
2608 a##_fls--; \
2609 } else { \
2610 b >>= 1; \
2611 b##_fls--; \
2612 } \
2613} while (0)
2614
2615
2616
2617
2618
2619 while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
2620 REDUCE_FLS(nsec, frequency);
2621 REDUCE_FLS(sec, count);
2622 }
2623
2624 if (count_fls + sec_fls > 64) {
2625 divisor = nsec * frequency;
2626
2627 while (count_fls + sec_fls > 64) {
2628 REDUCE_FLS(count, sec);
2629 divisor >>= 1;
2630 }
2631
2632 dividend = count * sec;
2633 } else {
2634 dividend = count * sec;
2635
2636 while (nsec_fls + frequency_fls > 64) {
2637 REDUCE_FLS(nsec, frequency);
2638 dividend >>= 1;
2639 }
2640
2641 divisor = nsec * frequency;
2642 }
2643
2644 if (!divisor)
2645 return dividend;
2646
2647 return div64_u64(dividend, divisor);
2648}
2649
2650static DEFINE_PER_CPU(int, perf_throttled_count);
2651static DEFINE_PER_CPU(u64, perf_throttled_seq);
2652
2653static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
2654{
2655 struct hw_perf_event *hwc = &event->hw;
2656 s64 period, sample_period;
2657 s64 delta;
2658
2659 period = perf_calculate_period(event, nsec, count);
2660
2661 delta = (s64)(period - hwc->sample_period);
2662 delta = (delta + 7) / 8;
2663
2664 sample_period = hwc->sample_period + delta;
2665
2666 if (!sample_period)
2667 sample_period = 1;
2668
2669 hwc->sample_period = sample_period;
2670
2671 if (local64_read(&hwc->period_left) > 8*sample_period) {
2672 if (disable)
2673 event->pmu->stop(event, PERF_EF_UPDATE);
2674
2675 local64_set(&hwc->period_left, 0);
2676
2677 if (disable)
2678 event->pmu->start(event, PERF_EF_RELOAD);
2679 }
2680}
2681
2682
2683
2684
2685
2686
2687static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2688 int needs_unthr)
2689{
2690 struct perf_event *event;
2691 struct hw_perf_event *hwc;
2692 u64 now, period = TICK_NSEC;
2693 s64 delta;
2694
2695
2696
2697
2698
2699
2700 if (!(ctx->nr_freq || needs_unthr))
2701 return;
2702
2703 raw_spin_lock(&ctx->lock);
2704 perf_pmu_disable(ctx->pmu);
2705
2706 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
2707 if (event->state != PERF_EVENT_STATE_ACTIVE)
2708 continue;
2709
2710 if (!event_filter_match(event))
2711 continue;
2712
2713 hwc = &event->hw;
2714
2715 if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) {
2716 hwc->interrupts = 0;
2717 perf_log_throttle(event, 1);
2718 event->pmu->start(event, 0);
2719 }
2720
2721 if (!event->attr.freq || !event->attr.sample_freq)
2722 continue;
2723
2724
2725
2726
2727 event->pmu->stop(event, PERF_EF_UPDATE);
2728
2729 now = local64_read(&event->count);
2730 delta = now - hwc->freq_count_stamp;
2731 hwc->freq_count_stamp = now;
2732
2733
2734
2735
2736
2737
2738
2739
2740 if (delta > 0)
2741 perf_adjust_period(event, period, delta, false);
2742
2743 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
2744 }
2745
2746 perf_pmu_enable(ctx->pmu);
2747 raw_spin_unlock(&ctx->lock);
2748}
2749
2750
2751
2752
2753static void rotate_ctx(struct perf_event_context *ctx)
2754{
2755
2756
2757
2758
2759 if (!ctx->rotate_disable)
2760 list_rotate_left(&ctx->flexible_groups);
2761}
2762
2763
2764
2765
2766
2767
2768static int perf_rotate_context(struct perf_cpu_context *cpuctx)
2769{
2770 struct perf_event_context *ctx = NULL;
2771 int rotate = 0, remove = 1;
2772
2773 if (cpuctx->ctx.nr_events) {
2774 remove = 0;
2775 if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
2776 rotate = 1;
2777 }
2778
2779 ctx = cpuctx->task_ctx;
2780 if (ctx && ctx->nr_events) {
2781 remove = 0;
2782 if (ctx->nr_events != ctx->nr_active)
2783 rotate = 1;
2784 }
2785
2786 if (!rotate)
2787 goto done;
2788
2789 perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2790 perf_pmu_disable(cpuctx->ctx.pmu);
2791
2792 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2793 if (ctx)
2794 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
2795
2796 rotate_ctx(&cpuctx->ctx);
2797 if (ctx)
2798 rotate_ctx(ctx);
2799
2800 perf_event_sched_in(cpuctx, ctx, current);
2801
2802 perf_pmu_enable(cpuctx->ctx.pmu);
2803 perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2804done:
2805 if (remove)
2806 list_del_init(&cpuctx->rotation_list);
2807
2808 return rotate;
2809}
2810
2811#ifdef CONFIG_NO_HZ_FULL
2812bool perf_event_can_stop_tick(void)
2813{
2814 if (list_empty(&__get_cpu_var(rotation_list)))
2815 return true;
2816 else
2817 return false;
2818}
2819#endif
2820
2821void perf_event_task_tick(void)
2822{
2823 struct list_head *head = &__get_cpu_var(rotation_list);
2824 struct perf_cpu_context *cpuctx, *tmp;
2825 struct perf_event_context *ctx;
2826 int throttled;
2827
2828 WARN_ON(!irqs_disabled());
2829
2830 __this_cpu_inc(perf_throttled_seq);
2831 throttled = __this_cpu_xchg(perf_throttled_count, 0);
2832
2833 list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
2834 ctx = &cpuctx->ctx;
2835 perf_adjust_freq_unthr_context(ctx, throttled);
2836
2837 ctx = cpuctx->task_ctx;
2838 if (ctx)
2839 perf_adjust_freq_unthr_context(ctx, throttled);
2840 }
2841}
2842
2843static int event_enable_on_exec(struct perf_event *event,
2844 struct perf_event_context *ctx)
2845{
2846 if (!event->attr.enable_on_exec)
2847 return 0;
2848
2849 event->attr.enable_on_exec = 0;
2850 if (event->state >= PERF_EVENT_STATE_INACTIVE)
2851 return 0;
2852
2853 __perf_event_mark_enabled(event);
2854
2855 return 1;
2856}
2857
2858
2859
2860
2861
2862static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2863{
2864 struct perf_event *event;
2865 unsigned long flags;
2866 int enabled = 0;
2867 int ret;
2868
2869 local_irq_save(flags);
2870 if (!ctx || !ctx->nr_events)
2871 goto out;
2872
2873
2874
2875
2876
2877
2878
2879
2880 perf_cgroup_sched_out(current, NULL);
2881
2882 raw_spin_lock(&ctx->lock);
2883 task_ctx_sched_out(ctx);
2884
2885 list_for_each_entry(event, &ctx->event_list, event_entry) {
2886 ret = event_enable_on_exec(event, ctx);
2887 if (ret)
2888 enabled = 1;
2889 }
2890
2891
2892
2893
2894 if (enabled)
2895 unclone_ctx(ctx);
2896
2897 raw_spin_unlock(&ctx->lock);
2898
2899
2900
2901
2902 perf_event_context_sched_in(ctx, ctx->task);
2903out:
2904 local_irq_restore(flags);
2905}
2906
2907
2908
2909
2910static void __perf_event_read(void *info)
2911{
2912 struct perf_event *event = info;
2913 struct perf_event_context *ctx = event->ctx;
2914 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2915
2916
2917
2918
2919
2920
2921
2922
2923 if (ctx->task && cpuctx->task_ctx != ctx)
2924 return;
2925
2926 raw_spin_lock(&ctx->lock);
2927 if (ctx->is_active) {
2928 update_context_time(ctx);
2929 update_cgrp_time_from_event(event);
2930 }
2931 update_event_times(event);
2932 if (event->state == PERF_EVENT_STATE_ACTIVE)
2933 event->pmu->read(event);
2934 raw_spin_unlock(&ctx->lock);
2935}
2936
2937static inline u64 perf_event_count(struct perf_event *event)
2938{
2939 return local64_read(&event->count) + atomic64_read(&event->child_count);
2940}
2941
2942static u64 perf_event_read(struct perf_event *event)
2943{
2944
2945
2946
2947
2948 if (event->state == PERF_EVENT_STATE_ACTIVE) {
2949 smp_call_function_single(event->oncpu,
2950 __perf_event_read, event, 1);
2951 } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
2952 struct perf_event_context *ctx = event->ctx;
2953 unsigned long flags;
2954
2955 raw_spin_lock_irqsave(&ctx->lock, flags);
2956
2957
2958
2959
2960
2961 if (ctx->is_active) {
2962 update_context_time(ctx);
2963 update_cgrp_time_from_event(event);
2964 }
2965 update_event_times(event);
2966 raw_spin_unlock_irqrestore(&ctx->lock, flags);
2967 }
2968
2969 return perf_event_count(event);
2970}
2971
2972
2973
2974
2975static void __perf_event_init_context(struct perf_event_context *ctx)
2976{
2977 raw_spin_lock_init(&ctx->lock);
2978 mutex_init(&ctx->mutex);
2979 INIT_LIST_HEAD(&ctx->pinned_groups);
2980 INIT_LIST_HEAD(&ctx->flexible_groups);
2981 INIT_LIST_HEAD(&ctx->event_list);
2982 atomic_set(&ctx->refcount, 1);
2983}
2984
2985static struct perf_event_context *
2986alloc_perf_context(struct pmu *pmu, struct task_struct *task)
2987{
2988 struct perf_event_context *ctx;
2989
2990 ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
2991 if (!ctx)
2992 return NULL;
2993
2994 __perf_event_init_context(ctx);
2995 if (task) {
2996 ctx->task = task;
2997 get_task_struct(task);
2998 }
2999 ctx->pmu = pmu;
3000
3001 return ctx;
3002}
3003
3004static struct task_struct *
3005find_lively_task_by_vpid(pid_t vpid)
3006{
3007 struct task_struct *task;
3008 int err;
3009
3010 rcu_read_lock();
3011 if (!vpid)
3012 task = current;
3013 else
3014 task = find_task_by_vpid(vpid);
3015 if (task)
3016 get_task_struct(task);
3017 rcu_read_unlock();
3018
3019 if (!task)
3020 return ERR_PTR(-ESRCH);
3021
3022
3023 err = -EACCES;
3024 if (!ptrace_may_access(task, PTRACE_MODE_READ))
3025 goto errout;
3026
3027 return task;
3028errout:
3029 put_task_struct(task);
3030 return ERR_PTR(err);
3031
3032}
3033
3034
3035
3036
3037static struct perf_event_context *
3038find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
3039{
3040 struct perf_event_context *ctx;
3041 struct perf_cpu_context *cpuctx;
3042 unsigned long flags;
3043 int ctxn, err;
3044
3045 if (!task) {
3046
3047 if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
3048 return ERR_PTR(-EACCES);
3049
3050
3051
3052
3053
3054
3055 if (!cpu_online(cpu))
3056 return ERR_PTR(-ENODEV);
3057
3058 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
3059 ctx = &cpuctx->ctx;
3060 get_ctx(ctx);
3061 ++ctx->pin_count;
3062
3063 return ctx;
3064 }
3065
3066 err = -EINVAL;
3067 ctxn = pmu->task_ctx_nr;
3068 if (ctxn < 0)
3069 goto errout;
3070
3071retry:
3072 ctx = perf_lock_task_context(task, ctxn, &flags);
3073 if (ctx) {
3074 unclone_ctx(ctx);
3075 ++ctx->pin_count;
3076 raw_spin_unlock_irqrestore(&ctx->lock, flags);
3077 } else {
3078 ctx = alloc_perf_context(pmu, task);
3079 err = -ENOMEM;
3080 if (!ctx)
3081 goto errout;
3082
3083 err = 0;
3084 mutex_lock(&task->perf_event_mutex);
3085
3086
3087
3088
3089 if (task->flags & PF_EXITING)
3090 err = -ESRCH;
3091 else if (task->perf_event_ctxp[ctxn])
3092 err = -EAGAIN;
3093 else {
3094 get_ctx(ctx);
3095 ++ctx->pin_count;
3096 rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
3097 }
3098 mutex_unlock(&task->perf_event_mutex);
3099
3100 if (unlikely(err)) {
3101 put_ctx(ctx);
3102
3103 if (err == -EAGAIN)
3104 goto retry;
3105 goto errout;
3106 }
3107 }
3108
3109 return ctx;
3110
3111errout:
3112 return ERR_PTR(err);
3113}
3114
3115static void perf_event_free_filter(struct perf_event *event);
3116
3117static void free_event_rcu(struct rcu_head *head)
3118{
3119 struct perf_event *event;
3120
3121 event = container_of(head, struct perf_event, rcu_head);
3122 if (event->ns)
3123 put_pid_ns(event->ns);
3124 perf_event_free_filter(event);
3125 kfree(event);
3126}
3127
3128static void ring_buffer_put(struct ring_buffer *rb);
3129static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
3130
3131static void free_event(struct perf_event *event)
3132{
3133 irq_work_sync(&event->pending);
3134
3135 if (!event->parent) {
3136 if (event->attach_state & PERF_ATTACH_TASK)
3137 static_key_slow_dec_deferred(&perf_sched_events);
3138 if (event->attr.mmap || event->attr.mmap_data)
3139 atomic_dec(&nr_mmap_events);
3140 if (event->attr.comm)
3141 atomic_dec(&nr_comm_events);
3142 if (event->attr.task)
3143 atomic_dec(&nr_task_events);
3144 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3145 put_callchain_buffers();
3146 if (is_cgroup_event(event)) {
3147 atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
3148 static_key_slow_dec_deferred(&perf_sched_events);
3149 }
3150
3151 if (has_branch_stack(event)) {
3152 static_key_slow_dec_deferred(&perf_sched_events);
3153
3154 if (!(event->attach_state & PERF_ATTACH_TASK)) {
3155 atomic_dec(&per_cpu(perf_branch_stack_events,
3156 event->cpu));
3157 }
3158 }
3159 }
3160
3161 if (event->rb) {
3162 struct ring_buffer *rb;
3163
3164
3165
3166
3167
3168
3169
3170 mutex_lock(&event->mmap_mutex);
3171 rb = event->rb;
3172 if (rb) {
3173 rcu_assign_pointer(event->rb, NULL);
3174 ring_buffer_detach(event, rb);
3175 ring_buffer_put(rb);
3176 }
3177 mutex_unlock(&event->mmap_mutex);
3178 }
3179
3180 if (is_cgroup_event(event))
3181 perf_detach_cgroup(event);
3182
3183 if (event->destroy)
3184 event->destroy(event);
3185
3186 if (event->ctx)
3187 put_ctx(event->ctx);
3188
3189 call_rcu(&event->rcu_head, free_event_rcu);
3190}
3191
3192int perf_event_release_kernel(struct perf_event *event)
3193{
3194 struct perf_event_context *ctx = event->ctx;
3195
3196 WARN_ON_ONCE(ctx->parent_ctx);
3197
3198
3199
3200
3201
3202
3203
3204
3205
3206
3207
3208
3209 mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
3210 raw_spin_lock_irq(&ctx->lock);
3211 perf_group_detach(event);
3212 raw_spin_unlock_irq(&ctx->lock);
3213 perf_remove_from_context(event);
3214 mutex_unlock(&ctx->mutex);
3215
3216 free_event(event);
3217
3218 return 0;
3219}
3220EXPORT_SYMBOL_GPL(perf_event_release_kernel);
3221
3222
3223
3224
3225static void put_event(struct perf_event *event)
3226{
3227 struct task_struct *owner;
3228
3229 if (!atomic_long_dec_and_test(&event->refcount))
3230 return;
3231
3232 rcu_read_lock();
3233 owner = ACCESS_ONCE(event->owner);
3234
3235
3236
3237
3238
3239
3240 smp_read_barrier_depends();
3241 if (owner) {
3242
3243
3244
3245
3246
3247 get_task_struct(owner);
3248 }
3249 rcu_read_unlock();
3250
3251 if (owner) {
3252 mutex_lock(&owner->perf_event_mutex);
3253
3254
3255
3256
3257
3258
3259 if (event->owner)
3260 list_del_init(&event->owner_entry);
3261 mutex_unlock(&owner->perf_event_mutex);
3262 put_task_struct(owner);
3263 }
3264
3265 perf_event_release_kernel(event);
3266}
3267
3268static int perf_release(struct inode *inode, struct file *file)
3269{
3270 put_event(file->private_data);
3271 return 0;
3272}
3273
3274u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
3275{
3276 struct perf_event *child;
3277 u64 total = 0;
3278
3279 *enabled = 0;
3280 *running = 0;
3281
3282 mutex_lock(&event->child_mutex);
3283 total += perf_event_read(event);
3284 *enabled += event->total_time_enabled +
3285 atomic64_read(&event->child_total_time_enabled);
3286 *running += event->total_time_running +
3287 atomic64_read(&event->child_total_time_running);
3288
3289 list_for_each_entry(child, &event->child_list, child_list) {
3290 total += perf_event_read(child);
3291 *enabled += child->total_time_enabled;
3292 *running += child->total_time_running;
3293 }
3294 mutex_unlock(&event->child_mutex);
3295
3296 return total;
3297}
3298EXPORT_SYMBOL_GPL(perf_event_read_value);
3299
3300static int perf_event_read_group(struct perf_event *event,
3301 u64 read_format, char __user *buf)
3302{
3303 struct perf_event *leader = event->group_leader, *sub;
3304 int n = 0, size = 0, ret = -EFAULT;
3305 struct perf_event_context *ctx = leader->ctx;
3306 u64 values[5];
3307 u64 count, enabled, running;
3308
3309 mutex_lock(&ctx->mutex);
3310 count = perf_event_read_value(leader, &enabled, &running);
3311
3312 values[n++] = 1 + leader->nr_siblings;
3313 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3314 values[n++] = enabled;
3315 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3316 values[n++] = running;
3317 values[n++] = count;
3318 if (read_format & PERF_FORMAT_ID)
3319 values[n++] = primary_event_id(leader);
3320
3321 size = n * sizeof(u64);
3322
3323 if (copy_to_user(buf, values, size))
3324 goto unlock;
3325
3326 ret = size;
3327
3328 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3329 n = 0;
3330
3331 values[n++] = perf_event_read_value(sub, &enabled, &running);
3332 if (read_format & PERF_FORMAT_ID)
3333 values[n++] = primary_event_id(sub);
3334
3335 size = n * sizeof(u64);
3336
3337 if (copy_to_user(buf + ret, values, size)) {
3338 ret = -EFAULT;
3339 goto unlock;
3340 }
3341
3342 ret += size;
3343 }
3344unlock:
3345 mutex_unlock(&ctx->mutex);
3346
3347 return ret;
3348}
3349
3350static int perf_event_read_one(struct perf_event *event,
3351 u64 read_format, char __user *buf)
3352{
3353 u64 enabled, running;
3354 u64 values[4];
3355 int n = 0;
3356
3357 values[n++] = perf_event_read_value(event, &enabled, &running);
3358 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3359 values[n++] = enabled;
3360 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3361 values[n++] = running;
3362 if (read_format & PERF_FORMAT_ID)
3363 values[n++] = primary_event_id(event);
3364
3365 if (copy_to_user(buf, values, n * sizeof(u64)))
3366 return -EFAULT;
3367
3368 return n * sizeof(u64);
3369}
3370
3371
3372
3373
3374static ssize_t
3375perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
3376{
3377 u64 read_format = event->attr.read_format;
3378 int ret;
3379
3380
3381
3382
3383
3384
3385 if (event->state == PERF_EVENT_STATE_ERROR)
3386 return 0;
3387
3388 if (count < event->read_size)
3389 return -ENOSPC;
3390
3391 WARN_ON_ONCE(event->ctx->parent_ctx);
3392 if (read_format & PERF_FORMAT_GROUP)
3393 ret = perf_event_read_group(event, read_format, buf);
3394 else
3395 ret = perf_event_read_one(event, read_format, buf);
3396
3397 return ret;
3398}
3399
3400static ssize_t
3401perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
3402{
3403 struct perf_event *event = file->private_data;
3404
3405 return perf_read_hw(event, buf, count);
3406}
3407
3408static unsigned int perf_poll(struct file *file, poll_table *wait)
3409{
3410 struct perf_event *event = file->private_data;
3411 struct ring_buffer *rb;
3412 unsigned int events = POLL_HUP;
3413
3414
3415
3416
3417
3418 mutex_lock(&event->mmap_mutex);
3419 rb = event->rb;
3420 if (rb)
3421 events = atomic_xchg(&rb->poll, 0);
3422 mutex_unlock(&event->mmap_mutex);
3423
3424 poll_wait(file, &event->waitq, wait);
3425
3426 return events;
3427}
3428
3429static void perf_event_reset(struct perf_event *event)
3430{
3431 (void)perf_event_read(event);
3432 local64_set(&event->count, 0);
3433 perf_event_update_userpage(event);
3434}
3435
3436
3437
3438
3439
3440
3441
3442static void perf_event_for_each_child(struct perf_event *event,
3443 void (*func)(struct perf_event *))
3444{
3445 struct perf_event *child;
3446
3447 WARN_ON_ONCE(event->ctx->parent_ctx);
3448 mutex_lock(&event->child_mutex);
3449 func(event);
3450 list_for_each_entry(child, &event->child_list, child_list)
3451 func(child);
3452 mutex_unlock(&event->child_mutex);
3453}
3454
3455static void perf_event_for_each(struct perf_event *event,
3456 void (*func)(struct perf_event *))
3457{
3458 struct perf_event_context *ctx = event->ctx;
3459 struct perf_event *sibling;
3460
3461 WARN_ON_ONCE(ctx->parent_ctx);
3462 mutex_lock(&ctx->mutex);
3463 event = event->group_leader;
3464
3465 perf_event_for_each_child(event, func);
3466 list_for_each_entry(sibling, &event->sibling_list, group_entry)
3467 perf_event_for_each_child(sibling, func);
3468 mutex_unlock(&ctx->mutex);
3469}
3470
3471static int perf_event_period(struct perf_event *event, u64 __user *arg)
3472{
3473 struct perf_event_context *ctx = event->ctx;
3474 int ret = 0;
3475 u64 value;
3476
3477 if (!is_sampling_event(event))
3478 return -EINVAL;
3479
3480 if (copy_from_user(&value, arg, sizeof(value)))
3481 return -EFAULT;
3482
3483 if (!value)
3484 return -EINVAL;
3485
3486 raw_spin_lock_irq(&ctx->lock);
3487 if (event->attr.freq) {
3488 if (value > sysctl_perf_event_sample_rate) {
3489 ret = -EINVAL;
3490 goto unlock;
3491 }
3492
3493 event->attr.sample_freq = value;
3494 } else {
3495 event->attr.sample_period = value;
3496 event->hw.sample_period = value;
3497 }
3498unlock:
3499 raw_spin_unlock_irq(&ctx->lock);
3500
3501 return ret;
3502}
3503
3504static const struct file_operations perf_fops;
3505
3506static inline int perf_fget_light(int fd, struct fd *p)
3507{
3508 struct fd f = fdget(fd);
3509 if (!f.file)
3510 return -EBADF;
3511
3512 if (f.file->f_op != &perf_fops) {
3513 fdput(f);
3514 return -EBADF;
3515 }
3516 *p = f;
3517 return 0;
3518}
3519
3520static int perf_event_set_output(struct perf_event *event,
3521 struct perf_event *output_event);
3522static int perf_event_set_filter(struct perf_event *event, void __user *arg);
3523
3524static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3525{
3526 struct perf_event *event = file->private_data;
3527 void (*func)(struct perf_event *);
3528 u32 flags = arg;
3529
3530 switch (cmd) {
3531 case PERF_EVENT_IOC_ENABLE:
3532 func = perf_event_enable;
3533 break;
3534 case PERF_EVENT_IOC_DISABLE:
3535 func = perf_event_disable;
3536 break;
3537 case PERF_EVENT_IOC_RESET:
3538 func = perf_event_reset;
3539 break;
3540
3541 case PERF_EVENT_IOC_REFRESH:
3542 return perf_event_refresh(event, arg);
3543
3544 case PERF_EVENT_IOC_PERIOD:
3545 return perf_event_period(event, (u64 __user *)arg);
3546
3547 case PERF_EVENT_IOC_SET_OUTPUT:
3548 {
3549 int ret;
3550 if (arg != -1) {
3551 struct perf_event *output_event;
3552 struct fd output;
3553 ret = perf_fget_light(arg, &output);
3554 if (ret)
3555 return ret;
3556 output_event = output.file->private_data;
3557 ret = perf_event_set_output(event, output_event);
3558 fdput(output);
3559 } else {
3560 ret = perf_event_set_output(event, NULL);
3561 }
3562 return ret;
3563 }
3564
3565 case PERF_EVENT_IOC_SET_FILTER:
3566 return perf_event_set_filter(event, (void __user *)arg);
3567
3568 default:
3569 return -ENOTTY;
3570 }
3571
3572 if (flags & PERF_IOC_FLAG_GROUP)
3573 perf_event_for_each(event, func);
3574 else
3575 perf_event_for_each_child(event, func);
3576
3577 return 0;
3578}
3579
3580int perf_event_task_enable(void)
3581{
3582 struct perf_event *event;
3583
3584 mutex_lock(¤t->perf_event_mutex);
3585 list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
3586 perf_event_for_each_child(event, perf_event_enable);
3587 mutex_unlock(¤t->perf_event_mutex);
3588
3589 return 0;
3590}
3591
3592int perf_event_task_disable(void)
3593{
3594 struct perf_event *event;
3595
3596 mutex_lock(¤t->perf_event_mutex);
3597 list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
3598 perf_event_for_each_child(event, perf_event_disable);
3599 mutex_unlock(¤t->perf_event_mutex);
3600
3601 return 0;
3602}
3603
3604static int perf_event_index(struct perf_event *event)
3605{
3606 if (event->hw.state & PERF_HES_STOPPED)
3607 return 0;
3608
3609 if (event->state != PERF_EVENT_STATE_ACTIVE)
3610 return 0;
3611
3612 return event->pmu->event_idx(event);
3613}
3614
3615static void calc_timer_values(struct perf_event *event,
3616 u64 *now,
3617 u64 *enabled,
3618 u64 *running)
3619{
3620 u64 ctx_time;
3621
3622 *now = perf_clock();
3623 ctx_time = event->shadow_ctx_time + *now;
3624 *enabled = ctx_time - event->tstamp_enabled;
3625 *running = ctx_time - event->tstamp_running;
3626}
3627
3628void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
3629{
3630}
3631
3632
3633
3634
3635
3636
3637void perf_event_update_userpage(struct perf_event *event)
3638{
3639 struct perf_event_mmap_page *userpg;
3640 struct ring_buffer *rb;
3641 u64 enabled, running, now;
3642
3643 rcu_read_lock();
3644
3645
3646
3647
3648
3649
3650
3651
3652
3653 calc_timer_values(event, &now, &enabled, &running);
3654 rb = rcu_dereference(event->rb);
3655 if (!rb)
3656 goto unlock;
3657
3658 userpg = rb->user_page;
3659
3660
3661
3662
3663
3664 preempt_disable();
3665 ++userpg->lock;
3666 barrier();
3667 userpg->index = perf_event_index(event);
3668 userpg->offset = perf_event_count(event);
3669 if (userpg->index)
3670 userpg->offset -= local64_read(&event->hw.prev_count);
3671
3672 userpg->time_enabled = enabled +
3673 atomic64_read(&event->child_total_time_enabled);
3674
3675 userpg->time_running = running +
3676 atomic64_read(&event->child_total_time_running);
3677
3678 arch_perf_update_userpage(userpg, now);
3679
3680 barrier();
3681 ++userpg->lock;
3682 preempt_enable();
3683unlock:
3684 rcu_read_unlock();
3685}
3686
3687static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3688{
3689 struct perf_event *event = vma->vm_file->private_data;
3690 struct ring_buffer *rb;
3691 int ret = VM_FAULT_SIGBUS;
3692
3693 if (vmf->flags & FAULT_FLAG_MKWRITE) {
3694 if (vmf->pgoff == 0)
3695 ret = 0;
3696 return ret;
3697 }
3698
3699 rcu_read_lock();
3700 rb = rcu_dereference(event->rb);
3701 if (!rb)
3702 goto unlock;
3703
3704 if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
3705 goto unlock;
3706
3707 vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
3708 if (!vmf->page)
3709 goto unlock;
3710
3711 get_page(vmf->page);
3712 vmf->page->mapping = vma->vm_file->f_mapping;
3713 vmf->page->index = vmf->pgoff;
3714
3715 ret = 0;
3716unlock:
3717 rcu_read_unlock();
3718
3719 return ret;
3720}
3721
3722static void ring_buffer_attach(struct perf_event *event,
3723 struct ring_buffer *rb)
3724{
3725 unsigned long flags;
3726
3727 if (!list_empty(&event->rb_entry))
3728 return;
3729
3730 spin_lock_irqsave(&rb->event_lock, flags);
3731 if (list_empty(&event->rb_entry))
3732 list_add(&event->rb_entry, &rb->event_list);
3733 spin_unlock_irqrestore(&rb->event_lock, flags);
3734}
3735
3736static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
3737{
3738 unsigned long flags;
3739
3740 if (list_empty(&event->rb_entry))
3741 return;
3742
3743 spin_lock_irqsave(&rb->event_lock, flags);
3744 list_del_init(&event->rb_entry);
3745 wake_up_all(&event->waitq);
3746 spin_unlock_irqrestore(&rb->event_lock, flags);
3747}
3748
3749static void ring_buffer_wakeup(struct perf_event *event)
3750{
3751 struct ring_buffer *rb;
3752
3753 rcu_read_lock();
3754 rb = rcu_dereference(event->rb);
3755 if (rb) {
3756 list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
3757 wake_up_all(&event->waitq);
3758 }
3759 rcu_read_unlock();
3760}
3761
3762static void rb_free_rcu(struct rcu_head *rcu_head)
3763{
3764 struct ring_buffer *rb;
3765
3766 rb = container_of(rcu_head, struct ring_buffer, rcu_head);
3767 rb_free(rb);
3768}
3769
3770static struct ring_buffer *ring_buffer_get(struct perf_event *event)
3771{
3772 struct ring_buffer *rb;
3773
3774 rcu_read_lock();
3775 rb = rcu_dereference(event->rb);
3776 if (rb) {
3777 if (!atomic_inc_not_zero(&rb->refcount))
3778 rb = NULL;
3779 }
3780 rcu_read_unlock();
3781
3782 return rb;
3783}
3784
3785static void ring_buffer_put(struct ring_buffer *rb)
3786{
3787 if (!atomic_dec_and_test(&rb->refcount))
3788 return;
3789
3790 WARN_ON_ONCE(!list_empty(&rb->event_list));
3791
3792 call_rcu(&rb->rcu_head, rb_free_rcu);
3793}
3794
3795static void perf_mmap_open(struct vm_area_struct *vma)
3796{
3797 struct perf_event *event = vma->vm_file->private_data;
3798
3799 atomic_inc(&event->mmap_count);
3800 atomic_inc(&event->rb->mmap_count);
3801}
3802
3803
3804
3805
3806
3807
3808
3809
3810
3811static void perf_mmap_close(struct vm_area_struct *vma)
3812{
3813 struct perf_event *event = vma->vm_file->private_data;
3814
3815 struct ring_buffer *rb = event->rb;
3816 struct user_struct *mmap_user = rb->mmap_user;
3817 int mmap_locked = rb->mmap_locked;
3818 unsigned long size = perf_data_size(rb);
3819
3820 atomic_dec(&rb->mmap_count);
3821
3822 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
3823 return;
3824
3825
3826 rcu_assign_pointer(event->rb, NULL);
3827 ring_buffer_detach(event, rb);
3828 mutex_unlock(&event->mmap_mutex);
3829
3830
3831 if (atomic_read(&rb->mmap_count)) {
3832 ring_buffer_put(rb);
3833 return;
3834 }
3835
3836
3837
3838
3839
3840
3841again:
3842 rcu_read_lock();
3843 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
3844 if (!atomic_long_inc_not_zero(&event->refcount)) {
3845
3846
3847
3848
3849 continue;
3850 }
3851 rcu_read_unlock();
3852
3853 mutex_lock(&event->mmap_mutex);
3854
3855
3856
3857
3858
3859
3860
3861
3862
3863
3864 if (event->rb == rb) {
3865 rcu_assign_pointer(event->rb, NULL);
3866 ring_buffer_detach(event, rb);
3867 ring_buffer_put(rb);
3868 }
3869 mutex_unlock(&event->mmap_mutex);
3870 put_event(event);
3871
3872
3873
3874
3875
3876 goto again;
3877 }
3878 rcu_read_unlock();
3879
3880
3881
3882
3883
3884
3885
3886
3887
3888
3889 atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
3890 vma->vm_mm->pinned_vm -= mmap_locked;
3891 free_uid(mmap_user);
3892
3893 ring_buffer_put(rb);
3894}
3895
3896static const struct vm_operations_struct perf_mmap_vmops = {
3897 .open = perf_mmap_open,
3898 .close = perf_mmap_close,
3899 .fault = perf_mmap_fault,
3900 .page_mkwrite = perf_mmap_fault,
3901};
3902
3903static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3904{
3905 struct perf_event *event = file->private_data;
3906 unsigned long user_locked, user_lock_limit;
3907 struct user_struct *user = current_user();
3908 unsigned long locked, lock_limit;
3909 struct ring_buffer *rb;
3910 unsigned long vma_size;
3911 unsigned long nr_pages;
3912 long user_extra, extra;
3913 int ret = 0, flags = 0;
3914
3915
3916
3917
3918
3919
3920 if (event->cpu == -1 && event->attr.inherit)
3921 return -EINVAL;
3922
3923 if (!(vma->vm_flags & VM_SHARED))
3924 return -EINVAL;
3925
3926 vma_size = vma->vm_end - vma->vm_start;
3927 nr_pages = (vma_size / PAGE_SIZE) - 1;
3928
3929
3930
3931
3932
3933 if (nr_pages != 0 && !is_power_of_2(nr_pages))
3934 return -EINVAL;
3935
3936 if (vma_size != PAGE_SIZE * (1 + nr_pages))
3937 return -EINVAL;
3938
3939 if (vma->vm_pgoff != 0)
3940 return -EINVAL;
3941
3942 WARN_ON_ONCE(event->ctx->parent_ctx);
3943again:
3944 mutex_lock(&event->mmap_mutex);
3945 if (event->rb) {
3946 if (event->rb->nr_pages != nr_pages) {
3947 ret = -EINVAL;
3948 goto unlock;
3949 }
3950
3951 if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
3952
3953
3954
3955
3956
3957 mutex_unlock(&event->mmap_mutex);
3958 goto again;
3959 }
3960
3961 goto unlock;
3962 }
3963
3964 user_extra = nr_pages + 1;
3965 user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
3966
3967
3968
3969
3970 user_lock_limit *= num_online_cpus();
3971
3972 user_locked = atomic_long_read(&user->locked_vm) + user_extra;
3973
3974 extra = 0;
3975 if (user_locked > user_lock_limit)
3976 extra = user_locked - user_lock_limit;
3977
3978 lock_limit = rlimit(RLIMIT_MEMLOCK);
3979 lock_limit >>= PAGE_SHIFT;
3980 locked = vma->vm_mm->pinned_vm + extra;
3981
3982 if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
3983 !capable(CAP_IPC_LOCK)) {
3984 ret = -EPERM;
3985 goto unlock;
3986 }
3987
3988 WARN_ON(event->rb);
3989
3990 if (vma->vm_flags & VM_WRITE)
3991 flags |= RING_BUFFER_WRITABLE;
3992
3993 rb = rb_alloc(nr_pages,
3994 event->attr.watermark ? event->attr.wakeup_watermark : 0,
3995 event->cpu, flags);
3996
3997 if (!rb) {
3998 ret = -ENOMEM;
3999 goto unlock;
4000 }
4001
4002 atomic_set(&rb->mmap_count, 1);
4003 rb->mmap_locked = extra;
4004 rb->mmap_user = get_current_user();
4005
4006 atomic_long_add(user_extra, &user->locked_vm);
4007 vma->vm_mm->pinned_vm += extra;
4008
4009 ring_buffer_attach(event, rb);
4010 rcu_assign_pointer(event->rb, rb);
4011
4012 perf_event_update_userpage(event);
4013
4014unlock:
4015 if (!ret)
4016 atomic_inc(&event->mmap_count);
4017 mutex_unlock(&event->mmap_mutex);
4018
4019
4020
4021
4022
4023 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
4024 vma->vm_ops = &perf_mmap_vmops;
4025
4026 return ret;
4027}
4028
4029static int perf_fasync(int fd, struct file *filp, int on)
4030{
4031 struct inode *inode = file_inode(filp);
4032 struct perf_event *event = filp->private_data;
4033 int retval;
4034
4035 mutex_lock(&inode->i_mutex);
4036 retval = fasync_helper(fd, filp, on, &event->fasync);
4037 mutex_unlock(&inode->i_mutex);
4038
4039 if (retval < 0)
4040 return retval;
4041
4042 return 0;
4043}
4044
4045static const struct file_operations perf_fops = {
4046 .llseek = no_llseek,
4047 .release = perf_release,
4048 .read = perf_read,
4049 .poll = perf_poll,
4050 .unlocked_ioctl = perf_ioctl,
4051 .compat_ioctl = perf_ioctl,
4052 .mmap = perf_mmap,
4053 .fasync = perf_fasync,
4054};
4055
4056
4057
4058
4059
4060
4061
4062
4063void perf_event_wakeup(struct perf_event *event)
4064{
4065 ring_buffer_wakeup(event);
4066
4067 if (event->pending_kill) {
4068 kill_fasync(&event->fasync, SIGIO, event->pending_kill);
4069 event->pending_kill = 0;
4070 }
4071}
4072
4073static void perf_pending_event(struct irq_work *entry)
4074{
4075 struct perf_event *event = container_of(entry,
4076 struct perf_event, pending);
4077
4078 if (event->pending_disable) {
4079 event->pending_disable = 0;
4080 __perf_event_disable(event);
4081 }
4082
4083 if (event->pending_wakeup) {
4084 event->pending_wakeup = 0;
4085 perf_event_wakeup(event);
4086 }
4087}
4088
4089
4090
4091
4092
4093
4094struct perf_guest_info_callbacks *perf_guest_cbs;
4095
4096int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
4097{
4098 perf_guest_cbs = cbs;
4099 return 0;
4100}
4101EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
4102
4103int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
4104{
4105 perf_guest_cbs = NULL;
4106 return 0;
4107}
4108EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
4109
4110static void
4111perf_output_sample_regs(struct perf_output_handle *handle,
4112 struct pt_regs *regs, u64 mask)
4113{
4114 int bit;
4115
4116 for_each_set_bit(bit, (const unsigned long *) &mask,
4117 sizeof(mask) * BITS_PER_BYTE) {
4118 u64 val;
4119
4120 val = perf_reg_value(regs, bit);
4121 perf_output_put(handle, val);
4122 }
4123}
4124
4125static void perf_sample_regs_user(struct perf_regs_user *regs_user,
4126 struct pt_regs *regs)
4127{
4128 if (!user_mode(regs)) {
4129 if (current->mm)
4130 regs = task_pt_regs(current);
4131 else
4132 regs = NULL;
4133 }
4134
4135 if (regs) {
4136 regs_user->regs = regs;
4137 regs_user->abi = perf_reg_abi(current);
4138 }
4139}
4140
4141
4142
4143
4144
4145
4146
4147
4148static u64 perf_ustack_task_size(struct pt_regs *regs)
4149{
4150 unsigned long addr = perf_user_stack_pointer(regs);
4151
4152 if (!addr || addr >= TASK_SIZE)
4153 return 0;
4154
4155 return TASK_SIZE - addr;
4156}
4157
4158static u16
4159perf_sample_ustack_size(u16 stack_size, u16 header_size,
4160 struct pt_regs *regs)
4161{
4162 u64 task_size;
4163
4164
4165 if (!regs)
4166 return 0;
4167
4168
4169
4170
4171
4172
4173
4174
4175
4176
4177
4178 task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
4179 stack_size = min(stack_size, (u16) task_size);
4180
4181
4182 header_size += 2 * sizeof(u64);
4183
4184
4185 if ((u16) (header_size + stack_size) < header_size) {
4186
4187
4188
4189
4190 stack_size = USHRT_MAX - header_size - sizeof(u64);
4191 stack_size = round_up(stack_size, sizeof(u64));
4192 }
4193
4194 return stack_size;
4195}
4196
4197static void
4198perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
4199 struct pt_regs *regs)
4200{
4201
4202 if (!regs) {
4203 u64 size = 0;
4204 perf_output_put(handle, size);
4205 } else {
4206 unsigned long sp;
4207 unsigned int rem;
4208 u64 dyn_size;
4209
4210
4211
4212
4213
4214
4215
4216
4217
4218
4219
4220
4221
4222 perf_output_put(handle, dump_size);
4223
4224
4225 sp = perf_user_stack_pointer(regs);
4226 rem = __output_copy_user(handle, (void *) sp, dump_size);
4227 dyn_size = dump_size - rem;
4228
4229 perf_output_skip(handle, rem);
4230
4231
4232 perf_output_put(handle, dyn_size);
4233 }
4234}
4235
4236static void __perf_event_header__init_id(struct perf_event_header *header,
4237 struct perf_sample_data *data,
4238 struct perf_event *event)
4239{
4240 u64 sample_type = event->attr.sample_type;
4241
4242 data->type = sample_type;
4243 header->size += event->id_header_size;
4244
4245 if (sample_type & PERF_SAMPLE_TID) {
4246
4247 data->tid_entry.pid = perf_event_pid(event, current);
4248 data->tid_entry.tid = perf_event_tid(event, current);
4249 }
4250
4251 if (sample_type & PERF_SAMPLE_TIME)
4252 data->time = perf_clock();
4253
4254 if (sample_type & PERF_SAMPLE_ID)
4255 data->id = primary_event_id(event);
4256
4257 if (sample_type & PERF_SAMPLE_STREAM_ID)
4258 data->stream_id = event->id;
4259
4260 if (sample_type & PERF_SAMPLE_CPU) {
4261 data->cpu_entry.cpu = raw_smp_processor_id();
4262 data->cpu_entry.reserved = 0;
4263 }
4264}
4265
4266void perf_event_header__init_id(struct perf_event_header *header,
4267 struct perf_sample_data *data,
4268 struct perf_event *event)
4269{
4270 if (event->attr.sample_id_all)
4271 __perf_event_header__init_id(header, data, event);
4272}
4273
4274static void __perf_event__output_id_sample(struct perf_output_handle *handle,
4275 struct perf_sample_data *data)
4276{
4277 u64 sample_type = data->type;
4278
4279 if (sample_type & PERF_SAMPLE_TID)
4280 perf_output_put(handle, data->tid_entry);
4281
4282 if (sample_type & PERF_SAMPLE_TIME)
4283 perf_output_put(handle, data->time);
4284
4285 if (sample_type & PERF_SAMPLE_ID)
4286 perf_output_put(handle, data->id);
4287
4288 if (sample_type & PERF_SAMPLE_STREAM_ID)
4289 perf_output_put(handle, data->stream_id);
4290
4291 if (sample_type & PERF_SAMPLE_CPU)
4292 perf_output_put(handle, data->cpu_entry);
4293}
4294
4295void perf_event__output_id_sample(struct perf_event *event,
4296 struct perf_output_handle *handle,
4297 struct perf_sample_data *sample)
4298{
4299 if (event->attr.sample_id_all)
4300 __perf_event__output_id_sample(handle, sample);
4301}
4302
4303static void perf_output_read_one(struct perf_output_handle *handle,
4304 struct perf_event *event,
4305 u64 enabled, u64 running)
4306{
4307 u64 read_format = event->attr.read_format;
4308 u64 values[4];
4309 int n = 0;
4310
4311 values[n++] = perf_event_count(event);
4312 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
4313 values[n++] = enabled +
4314 atomic64_read(&event->child_total_time_enabled);
4315 }
4316 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
4317 values[n++] = running +
4318 atomic64_read(&event->child_total_time_running);
4319 }
4320 if (read_format & PERF_FORMAT_ID)
4321 values[n++] = primary_event_id(event);
4322
4323 __output_copy(handle, values, n * sizeof(u64));
4324}
4325
4326
4327
4328
4329static void perf_output_read_group(struct perf_output_handle *handle,
4330 struct perf_event *event,
4331 u64 enabled, u64 running)
4332{
4333 struct perf_event *leader = event->group_leader, *sub;
4334 u64 read_format = event->attr.read_format;
4335 u64 values[5];
4336 int n = 0;
4337
4338 values[n++] = 1 + leader->nr_siblings;
4339
4340 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
4341 values[n++] = enabled;
4342
4343 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
4344 values[n++] = running;
4345
4346 if (leader != event)
4347 leader->pmu->read(leader);
4348
4349 values[n++] = perf_event_count(leader);
4350 if (read_format & PERF_FORMAT_ID)
4351 values[n++] = primary_event_id(leader);
4352
4353 __output_copy(handle, values, n * sizeof(u64));
4354
4355 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
4356 n = 0;
4357
4358 if (sub != event)
4359 sub->pmu->read(sub);
4360
4361 values[n++] = perf_event_count(sub);
4362 if (read_format & PERF_FORMAT_ID)
4363 values[n++] = primary_event_id(sub);
4364
4365 __output_copy(handle, values, n * sizeof(u64));
4366 }
4367}
4368
4369#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
4370 PERF_FORMAT_TOTAL_TIME_RUNNING)
4371
4372static void perf_output_read(struct perf_output_handle *handle,
4373 struct perf_event *event)
4374{
4375 u64 enabled = 0, running = 0, now;
4376 u64 read_format = event->attr.read_format;
4377
4378
4379
4380
4381
4382
4383
4384
4385
4386
4387 if (read_format & PERF_FORMAT_TOTAL_TIMES)
4388 calc_timer_values(event, &now, &enabled, &running);
4389
4390 if (event->attr.read_format & PERF_FORMAT_GROUP)
4391 perf_output_read_group(handle, event, enabled, running);
4392 else
4393 perf_output_read_one(handle, event, enabled, running);
4394}
4395
4396void perf_output_sample(struct perf_output_handle *handle,
4397 struct perf_event_header *header,
4398 struct perf_sample_data *data,
4399 struct perf_event *event)
4400{
4401 u64 sample_type = data->type;
4402
4403 perf_output_put(handle, *header);
4404
4405 if (sample_type & PERF_SAMPLE_IP)
4406 perf_output_put(handle, data->ip);
4407
4408 if (sample_type & PERF_SAMPLE_TID)
4409 perf_output_put(handle, data->tid_entry);
4410
4411 if (sample_type & PERF_SAMPLE_TIME)
4412 perf_output_put(handle, data->time);
4413
4414 if (sample_type & PERF_SAMPLE_ADDR)
4415 perf_output_put(handle, data->addr);
4416
4417 if (sample_type & PERF_SAMPLE_ID)
4418 perf_output_put(handle, data->id);
4419
4420 if (sample_type & PERF_SAMPLE_STREAM_ID)
4421 perf_output_put(handle, data->stream_id);
4422
4423 if (sample_type & PERF_SAMPLE_CPU)
4424 perf_output_put(handle, data->cpu_entry);
4425
4426 if (sample_type & PERF_SAMPLE_PERIOD)
4427 perf_output_put(handle, data->period);
4428
4429 if (sample_type & PERF_SAMPLE_READ)
4430 perf_output_read(handle, event);
4431
4432 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
4433 if (data->callchain) {
4434 int size = 1;
4435
4436 if (data->callchain)
4437 size += data->callchain->nr;
4438
4439 size *= sizeof(u64);
4440
4441 __output_copy(handle, data->callchain, size);
4442 } else {
4443 u64 nr = 0;
4444 perf_output_put(handle, nr);
4445 }
4446 }
4447
4448 if (sample_type & PERF_SAMPLE_RAW) {
4449 if (data->raw) {
4450 perf_output_put(handle, data->raw->size);
4451 __output_copy(handle, data->raw->data,
4452 data->raw->size);
4453 } else {
4454 struct {
4455 u32 size;
4456 u32 data;
4457 } raw = {
4458 .size = sizeof(u32),
4459 .data = 0,
4460 };
4461 perf_output_put(handle, raw);
4462 }
4463 }
4464
4465 if (!event->attr.watermark) {
4466 int wakeup_events = event->attr.wakeup_events;
4467
4468 if (wakeup_events) {
4469 struct ring_buffer *rb = handle->rb;
4470 int events = local_inc_return(&rb->events);
4471
4472 if (events >= wakeup_events) {
4473 local_sub(wakeup_events, &rb->events);
4474 local_inc(&rb->wakeup);
4475 }
4476 }
4477 }
4478
4479 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4480 if (data->br_stack) {
4481 size_t size;
4482
4483 size = data->br_stack->nr
4484 * sizeof(struct perf_branch_entry);
4485
4486 perf_output_put(handle, data->br_stack->nr);
4487 perf_output_copy(handle, data->br_stack->entries, size);
4488 } else {
4489
4490
4491
4492 u64 nr = 0;
4493 perf_output_put(handle, nr);
4494 }
4495 }
4496
4497 if (sample_type & PERF_SAMPLE_REGS_USER) {
4498 u64 abi = data->regs_user.abi;
4499
4500
4501
4502
4503
4504 perf_output_put(handle, abi);
4505
4506 if (abi) {
4507 u64 mask = event->attr.sample_regs_user;
4508 perf_output_sample_regs(handle,
4509 data->regs_user.regs,
4510 mask);
4511 }
4512 }
4513
4514 if (sample_type & PERF_SAMPLE_STACK_USER)
4515 perf_output_sample_ustack(handle,
4516 data->stack_user_size,
4517 data->regs_user.regs);
4518
4519 if (sample_type & PERF_SAMPLE_WEIGHT)
4520 perf_output_put(handle, data->weight);
4521
4522 if (sample_type & PERF_SAMPLE_DATA_SRC)
4523 perf_output_put(handle, data->data_src.val);
4524}
4525
4526void perf_prepare_sample(struct perf_event_header *header,
4527 struct perf_sample_data *data,
4528 struct perf_event *event,
4529 struct pt_regs *regs)
4530{
4531 u64 sample_type = event->attr.sample_type;
4532
4533 header->type = PERF_RECORD_SAMPLE;
4534 header->size = sizeof(*header) + event->header_size;
4535
4536 header->misc = 0;
4537 header->misc |= perf_misc_flags(regs);
4538
4539 __perf_event_header__init_id(header, data, event);
4540
4541 if (sample_type & PERF_SAMPLE_IP)
4542 data->ip = perf_instruction_pointer(regs);
4543
4544 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
4545 int size = 1;
4546
4547 data->callchain = perf_callchain(event, regs);
4548
4549 if (data->callchain)
4550 size += data->callchain->nr;
4551
4552 header->size += size * sizeof(u64);
4553 }
4554
4555 if (sample_type & PERF_SAMPLE_RAW) {
4556 int size = sizeof(u32);
4557
4558 if (data->raw)
4559 size += data->raw->size;
4560 else
4561 size += sizeof(u32);
4562
4563 WARN_ON_ONCE(size & (sizeof(u64)-1));
4564 header->size += size;
4565 }
4566
4567 if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4568 int size = sizeof(u64);
4569 if (data->br_stack) {
4570 size += data->br_stack->nr
4571 * sizeof(struct perf_branch_entry);
4572 }
4573 header->size += size;
4574 }
4575
4576 if (sample_type & PERF_SAMPLE_REGS_USER) {
4577
4578 int size = sizeof(u64);
4579
4580 perf_sample_regs_user(&data->regs_user, regs);
4581
4582 if (data->regs_user.regs) {
4583 u64 mask = event->attr.sample_regs_user;
4584 size += hweight64(mask) * sizeof(u64);
4585 }
4586
4587 header->size += size;
4588 }
4589
4590 if (sample_type & PERF_SAMPLE_STACK_USER) {
4591
4592
4593
4594
4595
4596
4597 struct perf_regs_user *uregs = &data->regs_user;
4598 u16 stack_size = event->attr.sample_stack_user;
4599 u16 size = sizeof(u64);
4600
4601 if (!uregs->abi)
4602 perf_sample_regs_user(uregs, regs);
4603
4604 stack_size = perf_sample_ustack_size(stack_size, header->size,
4605 uregs->regs);
4606
4607
4608
4609
4610
4611
4612 if (stack_size)
4613 size += sizeof(u64) + stack_size;
4614
4615 data->stack_user_size = stack_size;
4616 header->size += size;
4617 }
4618}
4619
4620static void perf_event_output(struct perf_event *event,
4621 struct perf_sample_data *data,
4622 struct pt_regs *regs)
4623{
4624 struct perf_output_handle handle;
4625 struct perf_event_header header;
4626
4627
4628 rcu_read_lock();
4629
4630 perf_prepare_sample(&header, data, event, regs);
4631
4632 if (perf_output_begin(&handle, event, header.size))
4633 goto exit;
4634
4635 perf_output_sample(&handle, &header, data, event);
4636
4637 perf_output_end(&handle);
4638
4639exit:
4640 rcu_read_unlock();
4641}
4642
4643
4644
4645
4646
4647struct perf_read_event {
4648 struct perf_event_header header;
4649
4650 u32 pid;
4651 u32 tid;
4652};
4653
4654static void
4655perf_event_read_event(struct perf_event *event,
4656 struct task_struct *task)
4657{
4658 struct perf_output_handle handle;
4659 struct perf_sample_data sample;
4660 struct perf_read_event read_event = {
4661 .header = {
4662 .type = PERF_RECORD_READ,
4663 .misc = 0,
4664 .size = sizeof(read_event) + event->read_size,
4665 },
4666 .pid = perf_event_pid(event, task),
4667 .tid = perf_event_tid(event, task),
4668 };
4669 int ret;
4670
4671 perf_event_header__init_id(&read_event.header, &sample, event);
4672 ret = perf_output_begin(&handle, event, read_event.header.size);
4673 if (ret)
4674 return;
4675
4676 perf_output_put(&handle, read_event);
4677 perf_output_read(&handle, event);
4678 perf_event__output_id_sample(event, &handle, &sample);
4679
4680 perf_output_end(&handle);
4681}
4682
4683typedef int (perf_event_aux_match_cb)(struct perf_event *event, void *data);
4684typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
4685
4686static void
4687perf_event_aux_ctx(struct perf_event_context *ctx,
4688 perf_event_aux_match_cb match,
4689 perf_event_aux_output_cb output,
4690 void *data)
4691{
4692 struct perf_event *event;
4693
4694 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4695 if (event->state < PERF_EVENT_STATE_INACTIVE)
4696 continue;
4697 if (!event_filter_match(event))
4698 continue;
4699 if (match(event, data))
4700 output(event, data);
4701 }
4702}
4703
4704static void
4705perf_event_aux(perf_event_aux_match_cb match,
4706 perf_event_aux_output_cb output,
4707 void *data,
4708 struct perf_event_context *task_ctx)
4709{
4710 struct perf_cpu_context *cpuctx;
4711 struct perf_event_context *ctx;
4712 struct pmu *pmu;
4713 int ctxn;
4714
4715 rcu_read_lock();
4716 list_for_each_entry_rcu(pmu, &pmus, entry) {
4717 cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4718 if (cpuctx->unique_pmu != pmu)
4719 goto next;
4720 perf_event_aux_ctx(&cpuctx->ctx, match, output, data);
4721 if (task_ctx)
4722 goto next;
4723 ctxn = pmu->task_ctx_nr;
4724 if (ctxn < 0)
4725 goto next;
4726 ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4727 if (ctx)
4728 perf_event_aux_ctx(ctx, match, output, data);
4729next:
4730 put_cpu_ptr(pmu->pmu_cpu_context);
4731 }
4732
4733 if (task_ctx) {
4734 preempt_disable();
4735 perf_event_aux_ctx(task_ctx, match, output, data);
4736 preempt_enable();
4737 }
4738 rcu_read_unlock();
4739}
4740
4741
4742
4743
4744
4745
4746
4747struct perf_task_event {
4748 struct task_struct *task;
4749 struct perf_event_context *task_ctx;
4750
4751 struct {
4752 struct perf_event_header header;
4753
4754 u32 pid;
4755 u32 ppid;
4756 u32 tid;
4757 u32 ptid;
4758 u64 time;
4759 } event_id;
4760};
4761
4762static void perf_event_task_output(struct perf_event *event,
4763 void *data)
4764{
4765 struct perf_task_event *task_event = data;
4766 struct perf_output_handle handle;
4767 struct perf_sample_data sample;
4768 struct task_struct *task = task_event->task;
4769 int ret, size = task_event->event_id.header.size;
4770
4771 perf_event_header__init_id(&task_event->event_id.header, &sample, event);
4772
4773 ret = perf_output_begin(&handle, event,
4774 task_event->event_id.header.size);
4775 if (ret)
4776 goto out;
4777
4778 task_event->event_id.pid = perf_event_pid(event, task);
4779 task_event->event_id.ppid = perf_event_pid(event, current);
4780
4781 task_event->event_id.tid = perf_event_tid(event, task);
4782 task_event->event_id.ptid = perf_event_tid(event, current);
4783
4784 perf_output_put(&handle, task_event->event_id);
4785
4786 perf_event__output_id_sample(event, &handle, &sample);
4787
4788 perf_output_end(&handle);
4789out:
4790 task_event->event_id.header.size = size;
4791}
4792
4793static int perf_event_task_match(struct perf_event *event,
4794 void *data __maybe_unused)
4795{
4796 return event->attr.comm || event->attr.mmap ||
4797 event->attr.mmap_data || event->attr.task;
4798}
4799
4800static void perf_event_task(struct task_struct *task,
4801 struct perf_event_context *task_ctx,
4802 int new)
4803{
4804 struct perf_task_event task_event;
4805
4806 if (!atomic_read(&nr_comm_events) &&
4807 !atomic_read(&nr_mmap_events) &&
4808 !atomic_read(&nr_task_events))
4809 return;
4810
4811 task_event = (struct perf_task_event){
4812 .task = task,
4813 .task_ctx = task_ctx,
4814 .event_id = {
4815 .header = {
4816 .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
4817 .misc = 0,
4818 .size = sizeof(task_event.event_id),
4819 },
4820
4821
4822
4823
4824 .time = perf_clock(),
4825 },
4826 };
4827
4828 perf_event_aux(perf_event_task_match,
4829 perf_event_task_output,
4830 &task_event,
4831 task_ctx);
4832}
4833
4834void perf_event_fork(struct task_struct *task)
4835{
4836 perf_event_task(task, NULL, 1);
4837}
4838
4839
4840
4841
4842
4843struct perf_comm_event {
4844 struct task_struct *task;
4845 char *comm;
4846 int comm_size;
4847
4848 struct {
4849 struct perf_event_header header;
4850
4851 u32 pid;
4852 u32 tid;
4853 } event_id;
4854};
4855
4856static void perf_event_comm_output(struct perf_event *event,
4857 void *data)
4858{
4859 struct perf_comm_event *comm_event = data;
4860 struct perf_output_handle handle;
4861 struct perf_sample_data sample;
4862 int size = comm_event->event_id.header.size;
4863 int ret;
4864
4865 perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
4866 ret = perf_output_begin(&handle, event,
4867 comm_event->event_id.header.size);
4868
4869 if (ret)
4870 goto out;
4871
4872 comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
4873 comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
4874
4875 perf_output_put(&handle, comm_event->event_id);
4876 __output_copy(&handle, comm_event->comm,
4877 comm_event->comm_size);
4878
4879 perf_event__output_id_sample(event, &handle, &sample);
4880
4881 perf_output_end(&handle);
4882out:
4883 comm_event->event_id.header.size = size;
4884}
4885
4886static int perf_event_comm_match(struct perf_event *event,
4887 void *data __maybe_unused)
4888{
4889 return event->attr.comm;
4890}
4891
4892static void perf_event_comm_event(struct perf_comm_event *comm_event)
4893{
4894 char comm[TASK_COMM_LEN];
4895 unsigned int size;
4896
4897 memset(comm, 0, sizeof(comm));
4898 strlcpy(comm, comm_event->task->comm, sizeof(comm));
4899 size = ALIGN(strlen(comm)+1, sizeof(u64));
4900
4901 comm_event->comm = comm;
4902 comm_event->comm_size = size;
4903
4904 comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
4905
4906 perf_event_aux(perf_event_comm_match,
4907 perf_event_comm_output,
4908 comm_event,
4909 NULL);
4910}
4911
4912void perf_event_comm(struct task_struct *task)
4913{
4914 struct perf_comm_event comm_event;
4915 struct perf_event_context *ctx;
4916 int ctxn;
4917
4918 rcu_read_lock();
4919 for_each_task_context_nr(ctxn) {
4920 ctx = task->perf_event_ctxp[ctxn];
4921 if (!ctx)
4922 continue;
4923
4924 perf_event_enable_on_exec(ctx);
4925 }
4926 rcu_read_unlock();
4927
4928 if (!atomic_read(&nr_comm_events))
4929 return;
4930
4931 comm_event = (struct perf_comm_event){
4932 .task = task,
4933
4934
4935 .event_id = {
4936 .header = {
4937 .type = PERF_RECORD_COMM,
4938 .misc = 0,
4939
4940 },
4941
4942
4943 },
4944 };
4945
4946 perf_event_comm_event(&comm_event);
4947}
4948
4949
4950
4951
4952
4953struct perf_mmap_event {
4954 struct vm_area_struct *vma;
4955
4956 const char *file_name;
4957 int file_size;
4958
4959 struct {
4960 struct perf_event_header header;
4961
4962 u32 pid;
4963 u32 tid;
4964 u64 start;
4965 u64 len;
4966 u64 pgoff;
4967 } event_id;
4968};
4969
4970static void perf_event_mmap_output(struct perf_event *event,
4971 void *data)
4972{
4973 struct perf_mmap_event *mmap_event = data;
4974 struct perf_output_handle handle;
4975 struct perf_sample_data sample;
4976 int size = mmap_event->event_id.header.size;
4977 int ret;
4978
4979 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
4980 ret = perf_output_begin(&handle, event,
4981 mmap_event->event_id.header.size);
4982 if (ret)
4983 goto out;
4984
4985 mmap_event->event_id.pid = perf_event_pid(event, current);
4986 mmap_event->event_id.tid = perf_event_tid(event, current);
4987
4988 perf_output_put(&handle, mmap_event->event_id);
4989 __output_copy(&handle, mmap_event->file_name,
4990 mmap_event->file_size);
4991
4992 perf_event__output_id_sample(event, &handle, &sample);
4993
4994 perf_output_end(&handle);
4995out:
4996 mmap_event->event_id.header.size = size;
4997}
4998
4999static int perf_event_mmap_match(struct perf_event *event,
5000 void *data)
5001{
5002 struct perf_mmap_event *mmap_event = data;
5003 struct vm_area_struct *vma = mmap_event->vma;
5004 int executable = vma->vm_flags & VM_EXEC;
5005
5006 return (!executable && event->attr.mmap_data) ||
5007 (executable && event->attr.mmap);
5008}
5009
5010static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
5011{
5012 struct vm_area_struct *vma = mmap_event->vma;
5013 struct file *file = vma->vm_file;
5014 unsigned int size;
5015 char tmp[16];
5016 char *buf = NULL;
5017 const char *name;
5018
5019 memset(tmp, 0, sizeof(tmp));
5020
5021 if (file) {
5022
5023
5024
5025
5026
5027 buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
5028 if (!buf) {
5029 name = strncpy(tmp, "//enomem", sizeof(tmp));
5030 goto got_name;
5031 }
5032 name = d_path(&file->f_path, buf, PATH_MAX);
5033 if (IS_ERR(name)) {
5034 name = strncpy(tmp, "//toolong", sizeof(tmp));
5035 goto got_name;
5036 }
5037 } else {
5038 if (arch_vma_name(mmap_event->vma)) {
5039 name = strncpy(tmp, arch_vma_name(mmap_event->vma),
5040 sizeof(tmp) - 1);
5041 tmp[sizeof(tmp) - 1] = '\0';
5042 goto got_name;
5043 }
5044
5045 if (!vma->vm_mm) {
5046 name = strncpy(tmp, "[vdso]", sizeof(tmp));
5047 goto got_name;
5048 } else if (vma->vm_start <= vma->vm_mm->start_brk &&
5049 vma->vm_end >= vma->vm_mm->brk) {
5050 name = strncpy(tmp, "[heap]", sizeof(tmp));
5051 goto got_name;
5052 } else if (vma->vm_start <= vma->vm_mm->start_stack &&
5053 vma->vm_end >= vma->vm_mm->start_stack) {
5054 name = strncpy(tmp, "[stack]", sizeof(tmp));
5055 goto got_name;
5056 }
5057
5058 name = strncpy(tmp, "//anon", sizeof(tmp));
5059 goto got_name;
5060 }
5061
5062got_name:
5063 size = ALIGN(strlen(name)+1, sizeof(u64));
5064
5065 mmap_event->file_name = name;
5066 mmap_event->file_size = size;
5067
5068 if (!(vma->vm_flags & VM_EXEC))
5069 mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
5070
5071 mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
5072
5073 perf_event_aux(perf_event_mmap_match,
5074 perf_event_mmap_output,
5075 mmap_event,
5076 NULL);
5077
5078 kfree(buf);
5079}
5080
5081void perf_event_mmap(struct vm_area_struct *vma)
5082{
5083 struct perf_mmap_event mmap_event;
5084
5085 if (!atomic_read(&nr_mmap_events))
5086 return;
5087
5088 mmap_event = (struct perf_mmap_event){
5089 .vma = vma,
5090
5091
5092 .event_id = {
5093 .header = {
5094 .type = PERF_RECORD_MMAP,
5095 .misc = PERF_RECORD_MISC_USER,
5096
5097 },
5098
5099
5100 .start = vma->vm_start,
5101 .len = vma->vm_end - vma->vm_start,
5102 .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
5103 },
5104 };
5105
5106 perf_event_mmap_event(&mmap_event);
5107}
5108
5109
5110
5111
5112
5113static void perf_log_throttle(struct perf_event *event, int enable)
5114{
5115 struct perf_output_handle handle;
5116 struct perf_sample_data sample;
5117 int ret;
5118
5119 struct {
5120 struct perf_event_header header;
5121 u64 time;
5122 u64 id;
5123 u64 stream_id;
5124 } throttle_event = {
5125 .header = {
5126 .type = PERF_RECORD_THROTTLE,
5127 .misc = 0,
5128 .size = sizeof(throttle_event),
5129 },
5130 .time = perf_clock(),
5131 .id = primary_event_id(event),
5132 .stream_id = event->id,
5133 };
5134
5135 if (enable)
5136 throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
5137
5138 perf_event_header__init_id(&throttle_event.header, &sample, event);
5139
5140 ret = perf_output_begin(&handle, event,
5141 throttle_event.header.size);
5142 if (ret)
5143 return;
5144
5145 perf_output_put(&handle, throttle_event);
5146 perf_event__output_id_sample(event, &handle, &sample);
5147 perf_output_end(&handle);
5148}
5149
5150
5151
5152
5153
5154static int __perf_event_overflow(struct perf_event *event,
5155 int throttle, struct perf_sample_data *data,
5156 struct pt_regs *regs)
5157{
5158 int events = atomic_read(&event->event_limit);
5159 struct hw_perf_event *hwc = &event->hw;
5160 u64 seq;
5161 int ret = 0;
5162
5163
5164
5165
5166
5167 if (unlikely(!is_sampling_event(event)))
5168 return 0;
5169
5170 seq = __this_cpu_read(perf_throttled_seq);
5171 if (seq != hwc->interrupts_seq) {
5172 hwc->interrupts_seq = seq;
5173 hwc->interrupts = 1;
5174 } else {
5175 hwc->interrupts++;
5176 if (unlikely(throttle
5177 && hwc->interrupts >= max_samples_per_tick)) {
5178 __this_cpu_inc(perf_throttled_count);
5179 hwc->interrupts = MAX_INTERRUPTS;
5180 perf_log_throttle(event, 0);
5181 ret = 1;
5182 }
5183 }
5184
5185 if (event->attr.freq) {
5186 u64 now = perf_clock();
5187 s64 delta = now - hwc->freq_time_stamp;
5188
5189 hwc->freq_time_stamp = now;
5190
5191 if (delta > 0 && delta < 2*TICK_NSEC)
5192 perf_adjust_period(event, delta, hwc->last_period, true);
5193 }
5194
5195
5196
5197
5198
5199
5200 event->pending_kill = POLL_IN;
5201 if (events && atomic_dec_and_test(&event->event_limit)) {
5202 ret = 1;
5203 event->pending_kill = POLL_HUP;
5204 event->pending_disable = 1;
5205 irq_work_queue(&event->pending);
5206 }
5207
5208 if (event->overflow_handler)
5209 event->overflow_handler(event, data, regs);
5210 else
5211 perf_event_output(event, data, regs);
5212
5213 if (event->fasync && event->pending_kill) {
5214 event->pending_wakeup = 1;
5215 irq_work_queue(&event->pending);
5216 }
5217
5218 return ret;
5219}
5220
5221int perf_event_overflow(struct perf_event *event,
5222 struct perf_sample_data *data,
5223 struct pt_regs *regs)
5224{
5225 return __perf_event_overflow(event, 1, data, regs);
5226}
5227
5228
5229
5230
5231
5232struct swevent_htable {
5233 struct swevent_hlist *swevent_hlist;
5234 struct mutex hlist_mutex;
5235 int hlist_refcount;
5236
5237
5238 int recursion[PERF_NR_CONTEXTS];
5239};
5240
5241static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
5242
5243
5244
5245
5246
5247
5248
5249
5250u64 perf_swevent_set_period(struct perf_event *event)
5251{
5252 struct hw_perf_event *hwc = &event->hw;
5253 u64 period = hwc->last_period;
5254 u64 nr, offset;
5255 s64 old, val;
5256
5257 hwc->last_period = hwc->sample_period;
5258
5259again:
5260 old = val = local64_read(&hwc->period_left);
5261 if (val < 0)
5262 return 0;
5263
5264 nr = div64_u64(period + val, period);
5265 offset = nr * period;
5266 val -= offset;
5267 if (local64_cmpxchg(&hwc->period_left, old, val) != old)
5268 goto again;
5269
5270 return nr;
5271}
5272
5273static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
5274 struct perf_sample_data *data,
5275 struct pt_regs *regs)
5276{
5277 struct hw_perf_event *hwc = &event->hw;
5278 int throttle = 0;
5279
5280 if (!overflow)
5281 overflow = perf_swevent_set_period(event);
5282
5283 if (hwc->interrupts == MAX_INTERRUPTS)
5284 return;
5285
5286 for (; overflow; overflow--) {
5287 if (__perf_event_overflow(event, throttle,
5288 data, regs)) {
5289
5290
5291
5292
5293 break;
5294 }
5295 throttle = 1;
5296 }
5297}
5298
5299static void perf_swevent_event(struct perf_event *event, u64 nr,
5300 struct perf_sample_data *data,
5301 struct pt_regs *regs)
5302{
5303 struct hw_perf_event *hwc = &event->hw;
5304
5305 local64_add(nr, &event->count);
5306
5307 if (!regs)
5308 return;
5309
5310 if (!is_sampling_event(event))
5311 return;
5312
5313 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
5314 data->period = nr;
5315 return perf_swevent_overflow(event, 1, data, regs);
5316 } else
5317 data->period = event->hw.last_period;
5318
5319 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
5320 return perf_swevent_overflow(event, 1, data, regs);
5321
5322 if (local64_add_negative(nr, &hwc->period_left))
5323 return;
5324
5325 perf_swevent_overflow(event, 0, data, regs);
5326}
5327
5328static int perf_exclude_event(struct perf_event *event,
5329 struct pt_regs *regs)
5330{
5331 if (event->hw.state & PERF_HES_STOPPED)
5332 return 1;
5333
5334 if (regs) {
5335 if (event->attr.exclude_user && user_mode(regs))
5336 return 1;
5337
5338 if (event->attr.exclude_kernel && !user_mode(regs))
5339 return 1;
5340 }
5341
5342 return 0;
5343}
5344
5345static int perf_swevent_match(struct perf_event *event,
5346 enum perf_type_id type,
5347 u32 event_id,
5348 struct perf_sample_data *data,
5349 struct pt_regs *regs)
5350{
5351 if (event->attr.type != type)
5352 return 0;
5353
5354 if (event->attr.config != event_id)
5355 return 0;
5356
5357 if (perf_exclude_event(event, regs))
5358 return 0;
5359
5360 return 1;
5361}
5362
5363static inline u64 swevent_hash(u64 type, u32 event_id)
5364{
5365 u64 val = event_id | (type << 32);
5366
5367 return hash_64(val, SWEVENT_HLIST_BITS);
5368}
5369
5370static inline struct hlist_head *
5371__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
5372{
5373 u64 hash = swevent_hash(type, event_id);
5374
5375 return &hlist->heads[hash];
5376}
5377
5378
5379static inline struct hlist_head *
5380find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
5381{
5382 struct swevent_hlist *hlist;
5383
5384 hlist = rcu_dereference(swhash->swevent_hlist);
5385 if (!hlist)
5386 return NULL;
5387
5388 return __find_swevent_head(hlist, type, event_id);
5389}
5390
5391
5392static inline struct hlist_head *
5393find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
5394{
5395 struct swevent_hlist *hlist;
5396 u32 event_id = event->attr.config;
5397 u64 type = event->attr.type;
5398
5399
5400
5401
5402
5403
5404 hlist = rcu_dereference_protected(swhash->swevent_hlist,
5405 lockdep_is_held(&event->ctx->lock));
5406 if (!hlist)
5407 return NULL;
5408
5409 return __find_swevent_head(hlist, type, event_id);
5410}
5411
5412static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
5413 u64 nr,
5414 struct perf_sample_data *data,
5415 struct pt_regs *regs)
5416{
5417 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5418 struct perf_event *event;
5419 struct hlist_head *head;
5420
5421 rcu_read_lock();
5422 head = find_swevent_head_rcu(swhash, type, event_id);
5423 if (!head)
5424 goto end;
5425
5426 hlist_for_each_entry_rcu(event, head, hlist_entry) {
5427 if (perf_swevent_match(event, type, event_id, data, regs))
5428 perf_swevent_event(event, nr, data, regs);
5429 }
5430end:
5431 rcu_read_unlock();
5432}
5433
5434int perf_swevent_get_recursion_context(void)
5435{
5436 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5437
5438 return get_recursion_context(swhash->recursion);
5439}
5440EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
5441
5442inline void perf_swevent_put_recursion_context(int rctx)
5443{
5444 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5445
5446 put_recursion_context(swhash->recursion, rctx);
5447}
5448
5449void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
5450{
5451 struct perf_sample_data data;
5452 int rctx;
5453
5454 preempt_disable_notrace();
5455 rctx = perf_swevent_get_recursion_context();
5456 if (rctx < 0)
5457 return;
5458
5459 perf_sample_data_init(&data, addr, 0);
5460
5461 do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
5462
5463 perf_swevent_put_recursion_context(rctx);
5464 preempt_enable_notrace();
5465}
5466
5467static void perf_swevent_read(struct perf_event *event)
5468{
5469}
5470
5471static int perf_swevent_add(struct perf_event *event, int flags)
5472{
5473 struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5474 struct hw_perf_event *hwc = &event->hw;
5475 struct hlist_head *head;
5476
5477 if (is_sampling_event(event)) {
5478 hwc->last_period = hwc->sample_period;
5479 perf_swevent_set_period(event);
5480 }
5481
5482 hwc->state = !(flags & PERF_EF_START);
5483
5484 head = find_swevent_head(swhash, event);
5485 if (WARN_ON_ONCE(!head))
5486 return -EINVAL;
5487
5488 hlist_add_head_rcu(&event->hlist_entry, head);
5489
5490 return 0;
5491}
5492
5493static void perf_swevent_del(struct perf_event *event, int flags)
5494{
5495 hlist_del_rcu(&event->hlist_entry);
5496}
5497
5498static void perf_swevent_start(struct perf_event *event, int flags)
5499{
5500 event->hw.state = 0;
5501}
5502
5503static void perf_swevent_stop(struct perf_event *event, int flags)
5504{
5505 event->hw.state = PERF_HES_STOPPED;
5506}
5507
5508
5509static inline struct swevent_hlist *
5510swevent_hlist_deref(struct swevent_htable *swhash)
5511{
5512 return rcu_dereference_protected(swhash->swevent_hlist,
5513 lockdep_is_held(&swhash->hlist_mutex));
5514}
5515
5516static void swevent_hlist_release(struct swevent_htable *swhash)
5517{
5518 struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
5519
5520 if (!hlist)
5521 return;
5522
5523 rcu_assign_pointer(swhash->swevent_hlist, NULL);
5524 kfree_rcu(hlist, rcu_head);
5525}
5526
5527static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
5528{
5529 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
5530
5531 mutex_lock(&swhash->hlist_mutex);
5532
5533 if (!--swhash->hlist_refcount)
5534 swevent_hlist_release(swhash);
5535
5536 mutex_unlock(&swhash->hlist_mutex);
5537}
5538
5539static void swevent_hlist_put(struct perf_event *event)
5540{
5541 int cpu;
5542
5543 if (event->cpu != -1) {
5544 swevent_hlist_put_cpu(event, event->cpu);
5545 return;
5546 }
5547
5548 for_each_possible_cpu(cpu)
5549 swevent_hlist_put_cpu(event, cpu);
5550}
5551
5552static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
5553{
5554 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
5555 int err = 0;
5556
5557 mutex_lock(&swhash->hlist_mutex);
5558
5559 if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
5560 struct swevent_hlist *hlist;
5561
5562 hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
5563 if (!hlist) {
5564 err = -ENOMEM;
5565 goto exit;
5566 }
5567 rcu_assign_pointer(swhash->swevent_hlist, hlist);
5568 }
5569 swhash->hlist_refcount++;
5570exit:
5571 mutex_unlock(&swhash->hlist_mutex);
5572
5573 return err;
5574}
5575
5576static int swevent_hlist_get(struct perf_event *event)
5577{
5578 int err;
5579 int cpu, failed_cpu;
5580
5581 if (event->cpu != -1)
5582 return swevent_hlist_get_cpu(event, event->cpu);
5583
5584 get_online_cpus();
5585 for_each_possible_cpu(cpu) {
5586 err = swevent_hlist_get_cpu(event, cpu);
5587 if (err) {
5588 failed_cpu = cpu;
5589 goto fail;
5590 }
5591 }
5592 put_online_cpus();
5593
5594 return 0;
5595fail:
5596 for_each_possible_cpu(cpu) {
5597 if (cpu == failed_cpu)
5598 break;
5599 swevent_hlist_put_cpu(event, cpu);
5600 }
5601
5602 put_online_cpus();
5603 return err;
5604}
5605
5606struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
5607
5608static void sw_perf_event_destroy(struct perf_event *event)
5609{
5610 u64 event_id = event->attr.config;
5611
5612 WARN_ON(event->parent);
5613
5614 static_key_slow_dec(&perf_swevent_enabled[event_id]);
5615 swevent_hlist_put(event);
5616}
5617
5618static int perf_swevent_init(struct perf_event *event)
5619{
5620 u64 event_id = event->attr.config;
5621
5622 if (event->attr.type != PERF_TYPE_SOFTWARE)
5623 return -ENOENT;
5624
5625
5626
5627
5628 if (has_branch_stack(event))
5629 return -EOPNOTSUPP;
5630
5631 switch (event_id) {
5632 case PERF_COUNT_SW_CPU_CLOCK:
5633 case PERF_COUNT_SW_TASK_CLOCK:
5634 return -ENOENT;
5635
5636 default:
5637 break;
5638 }
5639
5640 if (event_id >= PERF_COUNT_SW_MAX)
5641 return -ENOENT;
5642
5643 if (!event->parent) {
5644 int err;
5645
5646 err = swevent_hlist_get(event);
5647 if (err)
5648 return err;
5649
5650 static_key_slow_inc(&perf_swevent_enabled[event_id]);
5651 event->destroy = sw_perf_event_destroy;
5652 }
5653
5654 return 0;
5655}
5656
5657static int perf_swevent_event_idx(struct perf_event *event)
5658{
5659 return 0;
5660}
5661
5662static struct pmu perf_swevent = {
5663 .task_ctx_nr = perf_sw_context,
5664
5665 .event_init = perf_swevent_init,
5666 .add = perf_swevent_add,
5667 .del = perf_swevent_del,
5668 .start = perf_swevent_start,
5669 .stop = perf_swevent_stop,
5670 .read = perf_swevent_read,
5671
5672 .event_idx = perf_swevent_event_idx,
5673};
5674
5675#ifdef CONFIG_EVENT_TRACING
5676
5677static int perf_tp_filter_match(struct perf_event *event,
5678 struct perf_sample_data *data)
5679{
5680 void *record = data->raw->data;
5681
5682 if (likely(!event->filter) || filter_match_preds(event->filter, record))
5683 return 1;
5684 return 0;
5685}
5686
5687static int perf_tp_event_match(struct perf_event *event,
5688 struct perf_sample_data *data,
5689 struct pt_regs *regs)
5690{
5691 if (event->hw.state & PERF_HES_STOPPED)
5692 return 0;
5693
5694
5695
5696 if (event->attr.exclude_kernel)
5697 return 0;
5698
5699 if (!perf_tp_filter_match(event, data))
5700 return 0;
5701
5702 return 1;
5703}
5704
5705void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5706 struct pt_regs *regs, struct hlist_head *head, int rctx,
5707 struct task_struct *task)
5708{
5709 struct perf_sample_data data;
5710 struct perf_event *event;
5711
5712 struct perf_raw_record raw = {
5713 .size = entry_size,
5714 .data = record,
5715 };
5716
5717 perf_sample_data_init(&data, addr, 0);
5718 data.raw = &raw;
5719
5720 hlist_for_each_entry_rcu(event, head, hlist_entry) {
5721 if (perf_tp_event_match(event, &data, regs))
5722 perf_swevent_event(event, count, &data, regs);
5723 }
5724
5725
5726
5727
5728
5729 if (task && task != current) {
5730 struct perf_event_context *ctx;
5731 struct trace_entry *entry = record;
5732
5733 rcu_read_lock();
5734 ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
5735 if (!ctx)
5736 goto unlock;
5737
5738 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
5739 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5740 continue;
5741 if (event->attr.config != entry->type)
5742 continue;
5743 if (perf_tp_event_match(event, &data, regs))
5744 perf_swevent_event(event, count, &data, regs);
5745 }
5746unlock:
5747 rcu_read_unlock();
5748 }
5749
5750 perf_swevent_put_recursion_context(rctx);
5751}
5752EXPORT_SYMBOL_GPL(perf_tp_event);
5753
5754static void tp_perf_event_destroy(struct perf_event *event)
5755{
5756 perf_trace_destroy(event);
5757}
5758
5759static int perf_tp_event_init(struct perf_event *event)
5760{
5761 int err;
5762
5763 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5764 return -ENOENT;
5765
5766
5767
5768
5769 if (has_branch_stack(event))
5770 return -EOPNOTSUPP;
5771
5772 err = perf_trace_init(event);
5773 if (err)
5774 return err;
5775
5776 event->destroy = tp_perf_event_destroy;
5777
5778 return 0;
5779}
5780
5781static struct pmu perf_tracepoint = {
5782 .task_ctx_nr = perf_sw_context,
5783
5784 .event_init = perf_tp_event_init,
5785 .add = perf_trace_add,
5786 .del = perf_trace_del,
5787 .start = perf_swevent_start,
5788 .stop = perf_swevent_stop,
5789 .read = perf_swevent_read,
5790
5791 .event_idx = perf_swevent_event_idx,
5792};
5793
5794static inline void perf_tp_register(void)
5795{
5796 perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
5797}
5798
5799static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5800{
5801 char *filter_str;
5802 int ret;
5803
5804 if (event->attr.type != PERF_TYPE_TRACEPOINT)
5805 return -EINVAL;
5806
5807 filter_str = strndup_user(arg, PAGE_SIZE);
5808 if (IS_ERR(filter_str))
5809 return PTR_ERR(filter_str);
5810
5811 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
5812
5813 kfree(filter_str);
5814 return ret;
5815}
5816
5817static void perf_event_free_filter(struct perf_event *event)
5818{
5819 ftrace_profile_free_filter(event);
5820}
5821
5822#else
5823
5824static inline void perf_tp_register(void)
5825{
5826}
5827
5828static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5829{
5830 return -ENOENT;
5831}
5832
5833static void perf_event_free_filter(struct perf_event *event)
5834{
5835}
5836
5837#endif
5838
5839#ifdef CONFIG_HAVE_HW_BREAKPOINT
5840void perf_bp_event(struct perf_event *bp, void *data)
5841{
5842 struct perf_sample_data sample;
5843 struct pt_regs *regs = data;
5844
5845 perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
5846
5847 if (!bp->hw.state && !perf_exclude_event(bp, regs))
5848 perf_swevent_event(bp, 1, &sample, regs);
5849}
5850#endif
5851
5852
5853
5854
5855
5856static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
5857{
5858 enum hrtimer_restart ret = HRTIMER_RESTART;
5859 struct perf_sample_data data;
5860 struct pt_regs *regs;
5861 struct perf_event *event;
5862 u64 period;
5863
5864 event = container_of(hrtimer, struct perf_event, hw.hrtimer);
5865
5866 if (event->state != PERF_EVENT_STATE_ACTIVE)
5867 return HRTIMER_NORESTART;
5868
5869 event->pmu->read(event);
5870
5871 perf_sample_data_init(&data, 0, event->hw.last_period);
5872 regs = get_irq_regs();
5873
5874 if (regs && !perf_exclude_event(event, regs)) {
5875 if (!(event->attr.exclude_idle && is_idle_task(current)))
5876 if (__perf_event_overflow(event, 1, &data, regs))
5877 ret = HRTIMER_NORESTART;
5878 }
5879
5880 period = max_t(u64, 10000, event->hw.sample_period);
5881 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
5882
5883 return ret;
5884}
5885
5886static void perf_swevent_start_hrtimer(struct perf_event *event)
5887{
5888 struct hw_perf_event *hwc = &event->hw;
5889 s64 period;
5890
5891 if (!is_sampling_event(event))
5892 return;
5893
5894 period = local64_read(&hwc->period_left);
5895 if (period) {
5896 if (period < 0)
5897 period = 10000;
5898
5899 local64_set(&hwc->period_left, 0);
5900 } else {
5901 period = max_t(u64, 10000, hwc->sample_period);
5902 }
5903 __hrtimer_start_range_ns(&hwc->hrtimer,
5904 ns_to_ktime(period), 0,
5905 HRTIMER_MODE_REL_PINNED, 0);
5906}
5907
5908static void perf_swevent_cancel_hrtimer(struct perf_event *event)
5909{
5910 struct hw_perf_event *hwc = &event->hw;
5911
5912 if (is_sampling_event(event)) {
5913 ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
5914 local64_set(&hwc->period_left, ktime_to_ns(remaining));
5915
5916 hrtimer_cancel(&hwc->hrtimer);
5917 }
5918}
5919
5920static void perf_swevent_init_hrtimer(struct perf_event *event)
5921{
5922 struct hw_perf_event *hwc = &event->hw;
5923
5924 if (!is_sampling_event(event))
5925 return;
5926
5927 hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5928 hwc->hrtimer.function = perf_swevent_hrtimer;
5929
5930
5931
5932
5933
5934 if (event->attr.freq) {
5935 long freq = event->attr.sample_freq;
5936
5937 event->attr.sample_period = NSEC_PER_SEC / freq;
5938 hwc->sample_period = event->attr.sample_period;
5939 local64_set(&hwc->period_left, hwc->sample_period);
5940 hwc->last_period = hwc->sample_period;
5941 event->attr.freq = 0;
5942 }
5943}
5944
5945
5946
5947
5948
5949static void cpu_clock_event_update(struct perf_event *event)
5950{
5951 s64 prev;
5952 u64 now;
5953
5954 now = local_clock();
5955 prev = local64_xchg(&event->hw.prev_count, now);
5956 local64_add(now - prev, &event->count);
5957}
5958
5959static void cpu_clock_event_start(struct perf_event *event, int flags)
5960{
5961 local64_set(&event->hw.prev_count, local_clock());
5962 perf_swevent_start_hrtimer(event);
5963}
5964
5965static void cpu_clock_event_stop(struct perf_event *event, int flags)
5966{
5967 perf_swevent_cancel_hrtimer(event);
5968 cpu_clock_event_update(event);
5969}
5970
5971static int cpu_clock_event_add(struct perf_event *event, int flags)
5972{
5973 if (flags & PERF_EF_START)
5974 cpu_clock_event_start(event, flags);
5975
5976 return 0;
5977}
5978
5979static void cpu_clock_event_del(struct perf_event *event, int flags)
5980{
5981 cpu_clock_event_stop(event, flags);
5982}
5983
5984static void cpu_clock_event_read(struct perf_event *event)
5985{
5986 cpu_clock_event_update(event);
5987}
5988
5989static int cpu_clock_event_init(struct perf_event *event)
5990{
5991 if (event->attr.type != PERF_TYPE_SOFTWARE)
5992 return -ENOENT;
5993
5994 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
5995 return -ENOENT;
5996
5997
5998
5999
6000 if (has_branch_stack(event))
6001 return -EOPNOTSUPP;
6002
6003 perf_swevent_init_hrtimer(event);
6004
6005 return 0;
6006}
6007
6008static struct pmu perf_cpu_clock = {
6009 .task_ctx_nr = perf_sw_context,
6010
6011 .event_init = cpu_clock_event_init,
6012 .add = cpu_clock_event_add,
6013 .del = cpu_clock_event_del,
6014 .start = cpu_clock_event_start,
6015 .stop = cpu_clock_event_stop,
6016 .read = cpu_clock_event_read,
6017
6018 .event_idx = perf_swevent_event_idx,
6019};
6020
6021
6022
6023
6024
6025static void task_clock_event_update(struct perf_event *event, u64 now)
6026{
6027 u64 prev;
6028 s64 delta;
6029
6030 prev = local64_xchg(&event->hw.prev_count, now);
6031 delta = now - prev;
6032 local64_add(delta, &event->count);
6033}
6034
6035static void task_clock_event_start(struct perf_event *event, int flags)
6036{
6037 local64_set(&event->hw.prev_count, event->ctx->time);
6038 perf_swevent_start_hrtimer(event);
6039}
6040
6041static void task_clock_event_stop(struct perf_event *event, int flags)
6042{
6043 perf_swevent_cancel_hrtimer(event);
6044 task_clock_event_update(event, event->ctx->time);
6045}
6046
6047static int task_clock_event_add(struct perf_event *event, int flags)
6048{
6049 if (flags & PERF_EF_START)
6050 task_clock_event_start(event, flags);
6051
6052 return 0;
6053}
6054
6055static void task_clock_event_del(struct perf_event *event, int flags)
6056{
6057 task_clock_event_stop(event, PERF_EF_UPDATE);
6058}
6059
6060static void task_clock_event_read(struct perf_event *event)
6061{
6062 u64 now = perf_clock();
6063 u64 delta = now - event->ctx->timestamp;
6064 u64 time = event->ctx->time + delta;
6065
6066 task_clock_event_update(event, time);
6067}
6068
6069static int task_clock_event_init(struct perf_event *event)
6070{
6071 if (event->attr.type != PERF_TYPE_SOFTWARE)
6072 return -ENOENT;
6073
6074 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
6075 return -ENOENT;
6076
6077
6078
6079
6080 if (has_branch_stack(event))
6081 return -EOPNOTSUPP;
6082
6083 perf_swevent_init_hrtimer(event);
6084
6085 return 0;
6086}
6087
6088static struct pmu perf_task_clock = {
6089 .task_ctx_nr = perf_sw_context,
6090
6091 .event_init = task_clock_event_init,
6092 .add = task_clock_event_add,
6093 .del = task_clock_event_del,
6094 .start = task_clock_event_start,
6095 .stop = task_clock_event_stop,
6096 .read = task_clock_event_read,
6097
6098 .event_idx = perf_swevent_event_idx,
6099};
6100
6101static void perf_pmu_nop_void(struct pmu *pmu)
6102{
6103}
6104
6105static int perf_pmu_nop_int(struct pmu *pmu)
6106{
6107 return 0;
6108}
6109
6110static void perf_pmu_start_txn(struct pmu *pmu)
6111{
6112 perf_pmu_disable(pmu);
6113}
6114
6115static int perf_pmu_commit_txn(struct pmu *pmu)
6116{
6117 perf_pmu_enable(pmu);
6118 return 0;
6119}
6120
6121static void perf_pmu_cancel_txn(struct pmu *pmu)
6122{
6123 perf_pmu_enable(pmu);
6124}
6125
6126static int perf_event_idx_default(struct perf_event *event)
6127{
6128 return event->hw.idx + 1;
6129}
6130
6131
6132
6133
6134
6135static void *find_pmu_context(int ctxn)
6136{
6137 struct pmu *pmu;
6138
6139 if (ctxn < 0)
6140 return NULL;
6141
6142 list_for_each_entry(pmu, &pmus, entry) {
6143 if (pmu->task_ctx_nr == ctxn)
6144 return pmu->pmu_cpu_context;
6145 }
6146
6147 return NULL;
6148}
6149
6150static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
6151{
6152 int cpu;
6153
6154 for_each_possible_cpu(cpu) {
6155 struct perf_cpu_context *cpuctx;
6156
6157 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
6158
6159 if (cpuctx->unique_pmu == old_pmu)
6160 cpuctx->unique_pmu = pmu;
6161 }
6162}
6163
6164static void free_pmu_context(struct pmu *pmu)
6165{
6166 struct pmu *i;
6167
6168 mutex_lock(&pmus_lock);
6169
6170
6171
6172 list_for_each_entry(i, &pmus, entry) {
6173 if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
6174 update_pmu_context(i, pmu);
6175 goto out;
6176 }
6177 }
6178
6179 free_percpu(pmu->pmu_cpu_context);
6180out:
6181 mutex_unlock(&pmus_lock);
6182}
6183static struct idr pmu_idr;
6184
6185static ssize_t
6186type_show(struct device *dev, struct device_attribute *attr, char *page)
6187{
6188 struct pmu *pmu = dev_get_drvdata(dev);
6189
6190 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
6191}
6192
6193static ssize_t
6194perf_event_mux_interval_ms_show(struct device *dev,
6195 struct device_attribute *attr,
6196 char *page)
6197{
6198 struct pmu *pmu = dev_get_drvdata(dev);
6199
6200 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
6201}
6202
6203static ssize_t
6204perf_event_mux_interval_ms_store(struct device *dev,
6205 struct device_attribute *attr,
6206 const char *buf, size_t count)
6207{
6208 struct pmu *pmu = dev_get_drvdata(dev);
6209 int timer, cpu, ret;
6210
6211 ret = kstrtoint(buf, 0, &timer);
6212 if (ret)
6213 return ret;
6214
6215 if (timer < 1)
6216 return -EINVAL;
6217
6218
6219 if (timer == pmu->hrtimer_interval_ms)
6220 return count;
6221
6222 pmu->hrtimer_interval_ms = timer;
6223
6224
6225 for_each_possible_cpu(cpu) {
6226 struct perf_cpu_context *cpuctx;
6227 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
6228 cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
6229
6230 if (hrtimer_active(&cpuctx->hrtimer))
6231 hrtimer_forward_now(&cpuctx->hrtimer, cpuctx->hrtimer_interval);
6232 }
6233
6234 return count;
6235}
6236
6237static struct device_attribute pmu_dev_attrs[] = {
6238 __ATTR_RO(type),
6239 __ATTR_RW(perf_event_mux_interval_ms),
6240 __ATTR_NULL,
6241};
6242
6243static int pmu_bus_running;
6244static struct bus_type pmu_bus = {
6245 .name = "event_source",
6246 .dev_attrs = pmu_dev_attrs,
6247};
6248
6249static void pmu_dev_release(struct device *dev)
6250{
6251 kfree(dev);
6252}
6253
6254static int pmu_dev_alloc(struct pmu *pmu)
6255{
6256 int ret = -ENOMEM;
6257
6258 pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
6259 if (!pmu->dev)
6260 goto out;
6261
6262 pmu->dev->groups = pmu->attr_groups;
6263 device_initialize(pmu->dev);
6264 ret = dev_set_name(pmu->dev, "%s", pmu->name);
6265 if (ret)
6266 goto free_dev;
6267
6268 dev_set_drvdata(pmu->dev, pmu);
6269 pmu->dev->bus = &pmu_bus;
6270 pmu->dev->release = pmu_dev_release;
6271 ret = device_add(pmu->dev);
6272 if (ret)
6273 goto free_dev;
6274
6275out:
6276 return ret;
6277
6278free_dev:
6279 put_device(pmu->dev);
6280 goto out;
6281}
6282
6283static struct lock_class_key cpuctx_mutex;
6284static struct lock_class_key cpuctx_lock;
6285
6286int perf_pmu_register(struct pmu *pmu, const char *name, int type)
6287{
6288 int cpu, ret;
6289
6290 mutex_lock(&pmus_lock);
6291 ret = -ENOMEM;
6292 pmu->pmu_disable_count = alloc_percpu(int);
6293 if (!pmu->pmu_disable_count)
6294 goto unlock;
6295
6296 pmu->type = -1;
6297 if (!name)
6298 goto skip_type;
6299 pmu->name = name;
6300
6301 if (type < 0) {
6302 type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
6303 if (type < 0) {
6304 ret = type;
6305 goto free_pdc;
6306 }
6307 }
6308 pmu->type = type;
6309
6310 if (pmu_bus_running) {
6311 ret = pmu_dev_alloc(pmu);
6312 if (ret)
6313 goto free_idr;
6314 }
6315
6316skip_type:
6317 pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
6318 if (pmu->pmu_cpu_context)
6319 goto got_cpu_context;
6320
6321 ret = -ENOMEM;
6322 pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
6323 if (!pmu->pmu_cpu_context)
6324 goto free_dev;
6325
6326 for_each_possible_cpu(cpu) {
6327 struct perf_cpu_context *cpuctx;
6328
6329 cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
6330 __perf_event_init_context(&cpuctx->ctx);
6331 lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
6332 lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
6333 cpuctx->ctx.type = cpu_context;
6334 cpuctx->ctx.pmu = pmu;
6335
6336 __perf_cpu_hrtimer_init(cpuctx, cpu);
6337
6338 INIT_LIST_HEAD(&cpuctx->rotation_list);
6339 cpuctx->unique_pmu = pmu;
6340 }
6341
6342got_cpu_context:
6343 if (!pmu->start_txn) {
6344 if (pmu->pmu_enable) {
6345
6346
6347
6348
6349
6350 pmu->start_txn = perf_pmu_start_txn;
6351 pmu->commit_txn = perf_pmu_commit_txn;
6352 pmu->cancel_txn = perf_pmu_cancel_txn;
6353 } else {
6354 pmu->start_txn = perf_pmu_nop_void;
6355 pmu->commit_txn = perf_pmu_nop_int;
6356 pmu->cancel_txn = perf_pmu_nop_void;
6357 }
6358 }
6359
6360 if (!pmu->pmu_enable) {
6361 pmu->pmu_enable = perf_pmu_nop_void;
6362 pmu->pmu_disable = perf_pmu_nop_void;
6363 }
6364
6365 if (!pmu->event_idx)
6366 pmu->event_idx = perf_event_idx_default;
6367
6368 list_add_rcu(&pmu->entry, &pmus);
6369 ret = 0;
6370unlock:
6371 mutex_unlock(&pmus_lock);
6372
6373 return ret;
6374
6375free_dev:
6376 device_del(pmu->dev);
6377 put_device(pmu->dev);
6378
6379free_idr:
6380 if (pmu->type >= PERF_TYPE_MAX)
6381 idr_remove(&pmu_idr, pmu->type);
6382
6383free_pdc:
6384 free_percpu(pmu->pmu_disable_count);
6385 goto unlock;
6386}
6387
6388void perf_pmu_unregister(struct pmu *pmu)
6389{
6390 mutex_lock(&pmus_lock);
6391 list_del_rcu(&pmu->entry);
6392 mutex_unlock(&pmus_lock);
6393
6394
6395
6396
6397
6398 synchronize_srcu(&pmus_srcu);
6399 synchronize_rcu();
6400
6401 free_percpu(pmu->pmu_disable_count);
6402 if (pmu->type >= PERF_TYPE_MAX)
6403 idr_remove(&pmu_idr, pmu->type);
6404 device_del(pmu->dev);
6405 put_device(pmu->dev);
6406 free_pmu_context(pmu);
6407}
6408
6409struct pmu *perf_init_event(struct perf_event *event)
6410{
6411 struct pmu *pmu = NULL;
6412 int idx;
6413 int ret;
6414
6415 idx = srcu_read_lock(&pmus_srcu);
6416
6417 rcu_read_lock();
6418 pmu = idr_find(&pmu_idr, event->attr.type);
6419 rcu_read_unlock();
6420 if (pmu) {
6421 event->pmu = pmu;
6422 ret = pmu->event_init(event);
6423 if (ret)
6424 pmu = ERR_PTR(ret);
6425 goto unlock;
6426 }
6427
6428 list_for_each_entry_rcu(pmu, &pmus, entry) {
6429 event->pmu = pmu;
6430 ret = pmu->event_init(event);
6431 if (!ret)
6432 goto unlock;
6433
6434 if (ret != -ENOENT) {
6435 pmu = ERR_PTR(ret);
6436 goto unlock;
6437 }
6438 }
6439 pmu = ERR_PTR(-ENOENT);
6440unlock:
6441 srcu_read_unlock(&pmus_srcu, idx);
6442
6443 return pmu;
6444}
6445
6446
6447
6448
6449static struct perf_event *
6450perf_event_alloc(struct perf_event_attr *attr, int cpu,
6451 struct task_struct *task,
6452 struct perf_event *group_leader,
6453 struct perf_event *parent_event,
6454 perf_overflow_handler_t overflow_handler,
6455 void *context)
6456{
6457 struct pmu *pmu;
6458 struct perf_event *event;
6459 struct hw_perf_event *hwc;
6460 long err;
6461
6462 if ((unsigned)cpu >= nr_cpu_ids) {
6463 if (!task || cpu != -1)
6464 return ERR_PTR(-EINVAL);
6465 }
6466
6467 event = kzalloc(sizeof(*event), GFP_KERNEL);
6468 if (!event)
6469 return ERR_PTR(-ENOMEM);
6470
6471
6472
6473
6474
6475 if (!group_leader)
6476 group_leader = event;
6477
6478 mutex_init(&event->child_mutex);
6479 INIT_LIST_HEAD(&event->child_list);
6480
6481 INIT_LIST_HEAD(&event->group_entry);
6482 INIT_LIST_HEAD(&event->event_entry);
6483 INIT_LIST_HEAD(&event->sibling_list);
6484 INIT_LIST_HEAD(&event->rb_entry);
6485
6486 init_waitqueue_head(&event->waitq);
6487 init_irq_work(&event->pending, perf_pending_event);
6488
6489 mutex_init(&event->mmap_mutex);
6490
6491 atomic_long_set(&event->refcount, 1);
6492 event->cpu = cpu;
6493 event->attr = *attr;
6494 event->group_leader = group_leader;
6495 event->pmu = NULL;
6496 event->oncpu = -1;
6497
6498 event->parent = parent_event;
6499
6500 event->ns = get_pid_ns(task_active_pid_ns(current));
6501 event->id = atomic64_inc_return(&perf_event_id);
6502
6503 event->state = PERF_EVENT_STATE_INACTIVE;
6504
6505 if (task) {
6506 event->attach_state = PERF_ATTACH_TASK;
6507
6508 if (attr->type == PERF_TYPE_TRACEPOINT)
6509 event->hw.tp_target = task;
6510#ifdef CONFIG_HAVE_HW_BREAKPOINT
6511
6512
6513
6514 else if (attr->type == PERF_TYPE_BREAKPOINT)
6515 event->hw.bp_target = task;
6516#endif
6517 }
6518
6519 if (!overflow_handler && parent_event) {
6520 overflow_handler = parent_event->overflow_handler;
6521 context = parent_event->overflow_handler_context;
6522 }
6523
6524 event->overflow_handler = overflow_handler;
6525 event->overflow_handler_context = context;
6526
6527 perf_event__state_init(event);
6528
6529 pmu = NULL;
6530
6531 hwc = &event->hw;
6532 hwc->sample_period = attr->sample_period;
6533 if (attr->freq && attr->sample_freq)
6534 hwc->sample_period = 1;
6535 hwc->last_period = hwc->sample_period;
6536
6537 local64_set(&hwc->period_left, hwc->sample_period);
6538
6539
6540
6541
6542 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
6543 goto done;
6544
6545 pmu = perf_init_event(event);
6546
6547done:
6548 err = 0;
6549 if (!pmu)
6550 err = -EINVAL;
6551 else if (IS_ERR(pmu))
6552 err = PTR_ERR(pmu);
6553
6554 if (err) {
6555 if (event->ns)
6556 put_pid_ns(event->ns);
6557 kfree(event);
6558 return ERR_PTR(err);
6559 }
6560
6561 if (!event->parent) {
6562 if (event->attach_state & PERF_ATTACH_TASK)
6563 static_key_slow_inc(&perf_sched_events.key);
6564 if (event->attr.mmap || event->attr.mmap_data)
6565 atomic_inc(&nr_mmap_events);
6566 if (event->attr.comm)
6567 atomic_inc(&nr_comm_events);
6568 if (event->attr.task)
6569 atomic_inc(&nr_task_events);
6570 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
6571 err = get_callchain_buffers();
6572 if (err) {
6573 free_event(event);
6574 return ERR_PTR(err);
6575 }
6576 }
6577 if (has_branch_stack(event)) {
6578 static_key_slow_inc(&perf_sched_events.key);
6579 if (!(event->attach_state & PERF_ATTACH_TASK))
6580 atomic_inc(&per_cpu(perf_branch_stack_events,
6581 event->cpu));
6582 }
6583 }
6584
6585 return event;
6586}
6587
6588static int perf_copy_attr(struct perf_event_attr __user *uattr,
6589 struct perf_event_attr *attr)
6590{
6591 u32 size;
6592 int ret;
6593
6594 if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
6595 return -EFAULT;
6596
6597
6598
6599
6600 memset(attr, 0, sizeof(*attr));
6601
6602 ret = get_user(size, &uattr->size);
6603 if (ret)
6604 return ret;
6605
6606 if (size > PAGE_SIZE)
6607 goto err_size;
6608
6609 if (!size)
6610 size = PERF_ATTR_SIZE_VER0;
6611
6612 if (size < PERF_ATTR_SIZE_VER0)
6613 goto err_size;
6614
6615
6616
6617
6618
6619
6620
6621 if (size > sizeof(*attr)) {
6622 unsigned char __user *addr;
6623 unsigned char __user *end;
6624 unsigned char val;
6625
6626 addr = (void __user *)uattr + sizeof(*attr);
6627 end = (void __user *)uattr + size;
6628
6629 for (; addr < end; addr++) {
6630 ret = get_user(val, addr);
6631 if (ret)
6632 return ret;
6633 if (val)
6634 goto err_size;
6635 }
6636 size = sizeof(*attr);
6637 }
6638
6639 ret = copy_from_user(attr, uattr, size);
6640 if (ret)
6641 return -EFAULT;
6642
6643 if (attr->__reserved_1)
6644 return -EINVAL;
6645
6646 if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
6647 return -EINVAL;
6648
6649 if (attr->read_format & ~(PERF_FORMAT_MAX-1))
6650 return -EINVAL;
6651
6652 if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
6653 u64 mask = attr->branch_sample_type;
6654
6655
6656 if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
6657 return -EINVAL;
6658
6659
6660 if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
6661 return -EINVAL;
6662
6663
6664 if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
6665
6666
6667 if (!attr->exclude_kernel)
6668 mask |= PERF_SAMPLE_BRANCH_KERNEL;
6669
6670 if (!attr->exclude_user)
6671 mask |= PERF_SAMPLE_BRANCH_USER;
6672
6673 if (!attr->exclude_hv)
6674 mask |= PERF_SAMPLE_BRANCH_HV;
6675
6676
6677
6678 attr->branch_sample_type = mask;
6679 }
6680
6681 if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
6682 && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
6683 return -EACCES;
6684 }
6685
6686 if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
6687 ret = perf_reg_validate(attr->sample_regs_user);
6688 if (ret)
6689 return ret;
6690 }
6691
6692 if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
6693 if (!arch_perf_have_user_stack_dump())
6694 return -ENOSYS;
6695
6696
6697
6698
6699
6700
6701 if (attr->sample_stack_user >= USHRT_MAX)
6702 ret = -EINVAL;
6703 else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
6704 ret = -EINVAL;
6705 }
6706
6707out:
6708 return ret;
6709
6710err_size:
6711 put_user(sizeof(*attr), &uattr->size);
6712 ret = -E2BIG;
6713 goto out;
6714}
6715
6716static int
6717perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
6718{
6719 struct ring_buffer *rb = NULL, *old_rb = NULL;
6720 int ret = -EINVAL;
6721
6722 if (!output_event)
6723 goto set;
6724
6725
6726 if (event == output_event)
6727 goto out;
6728
6729
6730
6731
6732 if (output_event->cpu != event->cpu)
6733 goto out;
6734
6735
6736
6737
6738 if (output_event->cpu == -1 && output_event->ctx != event->ctx)
6739 goto out;
6740
6741set:
6742 mutex_lock(&event->mmap_mutex);
6743
6744 if (atomic_read(&event->mmap_count))
6745 goto unlock;
6746
6747 old_rb = event->rb;
6748
6749 if (output_event) {
6750
6751 rb = ring_buffer_get(output_event);
6752 if (!rb)
6753 goto unlock;
6754 }
6755
6756 if (old_rb)
6757 ring_buffer_detach(event, old_rb);
6758
6759 if (rb)
6760 ring_buffer_attach(event, rb);
6761
6762 rcu_assign_pointer(event->rb, rb);
6763
6764 if (old_rb) {
6765 ring_buffer_put(old_rb);
6766
6767
6768
6769
6770
6771 wake_up_all(&event->waitq);
6772 }
6773
6774 ret = 0;
6775unlock:
6776 mutex_unlock(&event->mmap_mutex);
6777
6778out:
6779 return ret;
6780}
6781
6782
6783
6784
6785
6786
6787
6788
6789
6790SYSCALL_DEFINE5(perf_event_open,
6791 struct perf_event_attr __user *, attr_uptr,
6792 pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
6793{
6794 struct perf_event *group_leader = NULL, *output_event = NULL;
6795 struct perf_event *event, *sibling;
6796 struct perf_event_attr attr;
6797 struct perf_event_context *ctx;
6798 struct file *event_file = NULL;
6799 struct fd group = {NULL, 0};
6800 struct task_struct *task = NULL;
6801 struct pmu *pmu;
6802 int event_fd;
6803 int move_group = 0;
6804 int err;
6805
6806
6807 if (flags & ~PERF_FLAG_ALL)
6808 return -EINVAL;
6809
6810 err = perf_copy_attr(attr_uptr, &attr);
6811 if (err)
6812 return err;
6813
6814 if (!attr.exclude_kernel) {
6815 if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
6816 return -EACCES;
6817 }
6818
6819 if (attr.freq) {
6820 if (attr.sample_freq > sysctl_perf_event_sample_rate)
6821 return -EINVAL;
6822 }
6823
6824
6825
6826
6827
6828
6829
6830 if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
6831 return -EINVAL;
6832
6833 event_fd = get_unused_fd();
6834 if (event_fd < 0)
6835 return event_fd;
6836
6837 if (group_fd != -1) {
6838 err = perf_fget_light(group_fd, &group);
6839 if (err)
6840 goto err_fd;
6841 group_leader = group.file->private_data;
6842 if (flags & PERF_FLAG_FD_OUTPUT)
6843 output_event = group_leader;
6844 if (flags & PERF_FLAG_FD_NO_GROUP)
6845 group_leader = NULL;
6846 }
6847
6848 if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
6849 task = find_lively_task_by_vpid(pid);
6850 if (IS_ERR(task)) {
6851 err = PTR_ERR(task);
6852 goto err_group_fd;
6853 }
6854 }
6855
6856 get_online_cpus();
6857
6858 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
6859 NULL, NULL);
6860 if (IS_ERR(event)) {
6861 err = PTR_ERR(event);
6862 goto err_task;
6863 }
6864
6865 if (flags & PERF_FLAG_PID_CGROUP) {
6866 err = perf_cgroup_connect(pid, event, &attr, group_leader);
6867 if (err)
6868 goto err_alloc;
6869
6870
6871
6872
6873
6874 atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
6875 static_key_slow_inc(&perf_sched_events.key);
6876 }
6877
6878
6879
6880
6881
6882 pmu = event->pmu;
6883
6884 if (group_leader &&
6885 (is_software_event(event) != is_software_event(group_leader))) {
6886 if (is_software_event(event)) {
6887
6888
6889
6890
6891
6892
6893
6894
6895 pmu = group_leader->pmu;
6896 } else if (is_software_event(group_leader) &&
6897 (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
6898
6899
6900
6901
6902
6903 move_group = 1;
6904 }
6905 }
6906
6907
6908
6909
6910 ctx = find_get_context(pmu, task, event->cpu);
6911 if (IS_ERR(ctx)) {
6912 err = PTR_ERR(ctx);
6913 goto err_alloc;
6914 }
6915
6916 if (task) {
6917 put_task_struct(task);
6918 task = NULL;
6919 }
6920
6921
6922
6923
6924 if (group_leader) {
6925 err = -EINVAL;
6926
6927
6928
6929
6930
6931 if (group_leader->group_leader != group_leader)
6932 goto err_context;
6933
6934
6935
6936
6937 if (move_group) {
6938 if (group_leader->ctx->type != ctx->type)
6939 goto err_context;
6940 } else {
6941 if (group_leader->ctx != ctx)
6942 goto err_context;
6943 }
6944
6945
6946
6947
6948 if (attr.exclusive || attr.pinned)
6949 goto err_context;
6950 }
6951
6952 if (output_event) {
6953 err = perf_event_set_output(event, output_event);
6954 if (err)
6955 goto err_context;
6956 }
6957
6958 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
6959 if (IS_ERR(event_file)) {
6960 err = PTR_ERR(event_file);
6961 goto err_context;
6962 }
6963
6964 if (move_group) {
6965 struct perf_event_context *gctx = group_leader->ctx;
6966
6967 mutex_lock(&gctx->mutex);
6968 perf_remove_from_context(group_leader);
6969
6970
6971
6972
6973
6974
6975 perf_event__state_init(group_leader);
6976 list_for_each_entry(sibling, &group_leader->sibling_list,
6977 group_entry) {
6978 perf_remove_from_context(sibling);
6979 perf_event__state_init(sibling);
6980 put_ctx(gctx);
6981 }
6982 mutex_unlock(&gctx->mutex);
6983 put_ctx(gctx);
6984 }
6985
6986 WARN_ON_ONCE(ctx->parent_ctx);
6987 mutex_lock(&ctx->mutex);
6988
6989 if (move_group) {
6990 synchronize_rcu();
6991 perf_install_in_context(ctx, group_leader, event->cpu);
6992 get_ctx(ctx);
6993 list_for_each_entry(sibling, &group_leader->sibling_list,
6994 group_entry) {
6995 perf_install_in_context(ctx, sibling, event->cpu);
6996 get_ctx(ctx);
6997 }
6998 }
6999
7000 perf_install_in_context(ctx, event, event->cpu);
7001 ++ctx->generation;
7002 perf_unpin_context(ctx);
7003 mutex_unlock(&ctx->mutex);
7004
7005 put_online_cpus();
7006
7007 event->owner = current;
7008
7009 mutex_lock(¤t->perf_event_mutex);
7010 list_add_tail(&event->owner_entry, ¤t->perf_event_list);
7011 mutex_unlock(¤t->perf_event_mutex);
7012
7013
7014
7015
7016 perf_event__header_size(event);
7017 perf_event__id_header_size(event);
7018
7019
7020
7021
7022
7023
7024
7025 fdput(group);
7026 fd_install(event_fd, event_file);
7027 return event_fd;
7028
7029err_context:
7030 perf_unpin_context(ctx);
7031 put_ctx(ctx);
7032err_alloc:
7033 free_event(event);
7034err_task:
7035 put_online_cpus();
7036 if (task)
7037 put_task_struct(task);
7038err_group_fd:
7039 fdput(group);
7040err_fd:
7041 put_unused_fd(event_fd);
7042 return err;
7043}
7044
7045
7046
7047
7048
7049
7050
7051
7052struct perf_event *
7053perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
7054 struct task_struct *task,
7055 perf_overflow_handler_t overflow_handler,
7056 void *context)
7057{
7058 struct perf_event_context *ctx;
7059 struct perf_event *event;
7060 int err;
7061
7062
7063
7064
7065
7066 event = perf_event_alloc(attr, cpu, task, NULL, NULL,
7067 overflow_handler, context);
7068 if (IS_ERR(event)) {
7069 err = PTR_ERR(event);
7070 goto err;
7071 }
7072
7073 ctx = find_get_context(event->pmu, task, cpu);
7074 if (IS_ERR(ctx)) {
7075 err = PTR_ERR(ctx);
7076 goto err_free;
7077 }
7078
7079 WARN_ON_ONCE(ctx->parent_ctx);
7080 mutex_lock(&ctx->mutex);
7081 perf_install_in_context(ctx, event, cpu);
7082 ++ctx->generation;
7083 perf_unpin_context(ctx);
7084 mutex_unlock(&ctx->mutex);
7085
7086 return event;
7087
7088err_free:
7089 free_event(event);
7090err:
7091 return ERR_PTR(err);
7092}
7093EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
7094
7095void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
7096{
7097 struct perf_event_context *src_ctx;
7098 struct perf_event_context *dst_ctx;
7099 struct perf_event *event, *tmp;
7100 LIST_HEAD(events);
7101
7102 src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
7103 dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
7104
7105 mutex_lock(&src_ctx->mutex);
7106 list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
7107 event_entry) {
7108 perf_remove_from_context(event);
7109 put_ctx(src_ctx);
7110 list_add(&event->event_entry, &events);
7111 }
7112 mutex_unlock(&src_ctx->mutex);
7113
7114 synchronize_rcu();
7115
7116 mutex_lock(&dst_ctx->mutex);
7117 list_for_each_entry_safe(event, tmp, &events, event_entry) {
7118 list_del(&event->event_entry);
7119 if (event->state >= PERF_EVENT_STATE_OFF)
7120 event->state = PERF_EVENT_STATE_INACTIVE;
7121 perf_install_in_context(dst_ctx, event, dst_cpu);
7122 get_ctx(dst_ctx);
7123 }
7124 mutex_unlock(&dst_ctx->mutex);
7125}
7126EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
7127
7128static void sync_child_event(struct perf_event *child_event,
7129 struct task_struct *child)
7130{
7131 struct perf_event *parent_event = child_event->parent;
7132 u64 child_val;
7133
7134 if (child_event->attr.inherit_stat)
7135 perf_event_read_event(child_event, child);
7136
7137 child_val = perf_event_count(child_event);
7138
7139
7140
7141
7142 atomic64_add(child_val, &parent_event->child_count);
7143 atomic64_add(child_event->total_time_enabled,
7144 &parent_event->child_total_time_enabled);
7145 atomic64_add(child_event->total_time_running,
7146 &parent_event->child_total_time_running);
7147
7148
7149
7150
7151 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
7152 mutex_lock(&parent_event->child_mutex);
7153 list_del_init(&child_event->child_list);
7154 mutex_unlock(&parent_event->child_mutex);
7155
7156
7157
7158
7159
7160 put_event(parent_event);
7161}
7162
7163static void
7164__perf_event_exit_task(struct perf_event *child_event,
7165 struct perf_event_context *child_ctx,
7166 struct task_struct *child)
7167{
7168 if (child_event->parent) {
7169 raw_spin_lock_irq(&child_ctx->lock);
7170 perf_group_detach(child_event);
7171 raw_spin_unlock_irq(&child_ctx->lock);
7172 }
7173
7174 perf_remove_from_context(child_event);
7175
7176
7177
7178
7179
7180
7181 if (child_event->parent) {
7182 sync_child_event(child_event, child);
7183 free_event(child_event);
7184 }
7185}
7186
7187static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
7188{
7189 struct perf_event *child_event, *tmp;
7190 struct perf_event_context *child_ctx;
7191 unsigned long flags;
7192
7193 if (likely(!child->perf_event_ctxp[ctxn])) {
7194 perf_event_task(child, NULL, 0);
7195 return;
7196 }
7197
7198 local_irq_save(flags);
7199
7200
7201
7202
7203
7204
7205 child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
7206
7207
7208
7209
7210
7211
7212 raw_spin_lock(&child_ctx->lock);
7213 task_ctx_sched_out(child_ctx);
7214 child->perf_event_ctxp[ctxn] = NULL;
7215
7216
7217
7218
7219
7220 unclone_ctx(child_ctx);
7221 update_context_time(child_ctx);
7222 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
7223
7224
7225
7226
7227
7228
7229 perf_event_task(child, child_ctx, 0);
7230
7231
7232
7233
7234
7235
7236
7237
7238
7239
7240
7241 mutex_lock(&child_ctx->mutex);
7242
7243again:
7244 list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
7245 group_entry)
7246 __perf_event_exit_task(child_event, child_ctx, child);
7247
7248 list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
7249 group_entry)
7250 __perf_event_exit_task(child_event, child_ctx, child);
7251
7252
7253
7254
7255
7256
7257 if (!list_empty(&child_ctx->pinned_groups) ||
7258 !list_empty(&child_ctx->flexible_groups))
7259 goto again;
7260
7261 mutex_unlock(&child_ctx->mutex);
7262
7263 put_ctx(child_ctx);
7264}
7265
7266
7267
7268
7269void perf_event_exit_task(struct task_struct *child)
7270{
7271 struct perf_event *event, *tmp;
7272 int ctxn;
7273
7274 mutex_lock(&child->perf_event_mutex);
7275 list_for_each_entry_safe(event, tmp, &child->perf_event_list,
7276 owner_entry) {
7277 list_del_init(&event->owner_entry);
7278
7279
7280
7281
7282
7283
7284 smp_wmb();
7285 event->owner = NULL;
7286 }
7287 mutex_unlock(&child->perf_event_mutex);
7288
7289 for_each_task_context_nr(ctxn)
7290 perf_event_exit_task_context(child, ctxn);
7291}
7292
7293static void perf_free_event(struct perf_event *event,
7294 struct perf_event_context *ctx)
7295{
7296 struct perf_event *parent = event->parent;
7297
7298 if (WARN_ON_ONCE(!parent))
7299 return;
7300
7301 mutex_lock(&parent->child_mutex);
7302 list_del_init(&event->child_list);
7303 mutex_unlock(&parent->child_mutex);
7304
7305 put_event(parent);
7306
7307 perf_group_detach(event);
7308 list_del_event(event, ctx);
7309 free_event(event);
7310}
7311
7312
7313
7314
7315
7316void perf_event_free_task(struct task_struct *task)
7317{
7318 struct perf_event_context *ctx;
7319 struct perf_event *event, *tmp;
7320 int ctxn;
7321
7322 for_each_task_context_nr(ctxn) {
7323 ctx = task->perf_event_ctxp[ctxn];
7324 if (!ctx)
7325 continue;
7326
7327 mutex_lock(&ctx->mutex);
7328again:
7329 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
7330 group_entry)
7331 perf_free_event(event, ctx);
7332
7333 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
7334 group_entry)
7335 perf_free_event(event, ctx);
7336
7337 if (!list_empty(&ctx->pinned_groups) ||
7338 !list_empty(&ctx->flexible_groups))
7339 goto again;
7340
7341 mutex_unlock(&ctx->mutex);
7342
7343 put_ctx(ctx);
7344 }
7345}
7346
7347void perf_event_delayed_put(struct task_struct *task)
7348{
7349 int ctxn;
7350
7351 for_each_task_context_nr(ctxn)
7352 WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
7353}
7354
7355
7356
7357
7358static struct perf_event *
7359inherit_event(struct perf_event *parent_event,
7360 struct task_struct *parent,
7361 struct perf_event_context *parent_ctx,
7362 struct task_struct *child,
7363 struct perf_event *group_leader,
7364 struct perf_event_context *child_ctx)
7365{
7366 struct perf_event *child_event;
7367 unsigned long flags;
7368
7369
7370
7371
7372
7373
7374
7375 if (parent_event->parent)
7376 parent_event = parent_event->parent;
7377
7378 child_event = perf_event_alloc(&parent_event->attr,
7379 parent_event->cpu,
7380 child,
7381 group_leader, parent_event,
7382 NULL, NULL);
7383 if (IS_ERR(child_event))
7384 return child_event;
7385
7386 if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
7387 free_event(child_event);
7388 return NULL;
7389 }
7390
7391 get_ctx(child_ctx);
7392
7393
7394
7395
7396
7397
7398 if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
7399 child_event->state = PERF_EVENT_STATE_INACTIVE;
7400 else
7401 child_event->state = PERF_EVENT_STATE_OFF;
7402
7403 if (parent_event->attr.freq) {
7404 u64 sample_period = parent_event->hw.sample_period;
7405 struct hw_perf_event *hwc = &child_event->hw;
7406
7407 hwc->sample_period = sample_period;
7408 hwc->last_period = sample_period;
7409
7410 local64_set(&hwc->period_left, sample_period);
7411 }
7412
7413 child_event->ctx = child_ctx;
7414 child_event->overflow_handler = parent_event->overflow_handler;
7415 child_event->overflow_handler_context
7416 = parent_event->overflow_handler_context;
7417
7418
7419
7420
7421 perf_event__header_size(child_event);
7422 perf_event__id_header_size(child_event);
7423
7424
7425
7426
7427 raw_spin_lock_irqsave(&child_ctx->lock, flags);
7428 add_event_to_ctx(child_event, child_ctx);
7429 raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
7430
7431
7432
7433
7434 WARN_ON_ONCE(parent_event->ctx->parent_ctx);
7435 mutex_lock(&parent_event->child_mutex);
7436 list_add_tail(&child_event->child_list, &parent_event->child_list);
7437 mutex_unlock(&parent_event->child_mutex);
7438
7439 return child_event;
7440}
7441
7442static int inherit_group(struct perf_event *parent_event,
7443 struct task_struct *parent,
7444 struct perf_event_context *parent_ctx,
7445 struct task_struct *child,
7446 struct perf_event_context *child_ctx)
7447{
7448 struct perf_event *leader;
7449 struct perf_event *sub;
7450 struct perf_event *child_ctr;
7451
7452 leader = inherit_event(parent_event, parent, parent_ctx,
7453 child, NULL, child_ctx);
7454 if (IS_ERR(leader))
7455 return PTR_ERR(leader);
7456 list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
7457 child_ctr = inherit_event(sub, parent, parent_ctx,
7458 child, leader, child_ctx);
7459 if (IS_ERR(child_ctr))
7460 return PTR_ERR(child_ctr);
7461 }
7462 return 0;
7463}
7464
7465static int
7466inherit_task_group(struct perf_event *event, struct task_struct *parent,
7467 struct perf_event_context *parent_ctx,
7468 struct task_struct *child, int ctxn,
7469 int *inherited_all)
7470{
7471 int ret;
7472 struct perf_event_context *child_ctx;
7473
7474 if (!event->attr.inherit) {
7475 *inherited_all = 0;
7476 return 0;
7477 }
7478
7479 child_ctx = child->perf_event_ctxp[ctxn];
7480 if (!child_ctx) {
7481
7482
7483
7484
7485
7486
7487
7488 child_ctx = alloc_perf_context(parent_ctx->pmu, child);
7489 if (!child_ctx)
7490 return -ENOMEM;
7491
7492 child->perf_event_ctxp[ctxn] = child_ctx;
7493 }
7494
7495 ret = inherit_group(event, parent, parent_ctx,
7496 child, child_ctx);
7497
7498 if (ret)
7499 *inherited_all = 0;
7500
7501 return ret;
7502}
7503
7504
7505
7506
7507int perf_event_init_context(struct task_struct *child, int ctxn)
7508{
7509 struct perf_event_context *child_ctx, *parent_ctx;
7510 struct perf_event_context *cloned_ctx;
7511 struct perf_event *event;
7512 struct task_struct *parent = current;
7513 int inherited_all = 1;
7514 unsigned long flags;
7515 int ret = 0;
7516
7517 if (likely(!parent->perf_event_ctxp[ctxn]))
7518 return 0;
7519
7520
7521
7522
7523
7524 parent_ctx = perf_pin_task_context(parent, ctxn);
7525
7526
7527
7528
7529
7530
7531
7532
7533
7534
7535
7536
7537 mutex_lock(&parent_ctx->mutex);
7538
7539
7540
7541
7542
7543 list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
7544 ret = inherit_task_group(event, parent, parent_ctx,
7545 child, ctxn, &inherited_all);
7546 if (ret)
7547 break;
7548 }
7549
7550
7551
7552
7553
7554
7555 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
7556 parent_ctx->rotate_disable = 1;
7557 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
7558
7559 list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
7560 ret = inherit_task_group(event, parent, parent_ctx,
7561 child, ctxn, &inherited_all);
7562 if (ret)
7563 break;
7564 }
7565
7566 raw_spin_lock_irqsave(&parent_ctx->lock, flags);
7567 parent_ctx->rotate_disable = 0;
7568
7569 child_ctx = child->perf_event_ctxp[ctxn];
7570
7571 if (child_ctx && inherited_all) {
7572
7573
7574
7575
7576
7577
7578
7579 cloned_ctx = parent_ctx->parent_ctx;
7580 if (cloned_ctx) {
7581 child_ctx->parent_ctx = cloned_ctx;
7582 child_ctx->parent_gen = parent_ctx->parent_gen;
7583 } else {
7584 child_ctx->parent_ctx = parent_ctx;
7585 child_ctx->parent_gen = parent_ctx->generation;
7586 }
7587 get_ctx(child_ctx->parent_ctx);
7588 }
7589
7590 raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
7591 mutex_unlock(&parent_ctx->mutex);
7592
7593 perf_unpin_context(parent_ctx);
7594 put_ctx(parent_ctx);
7595
7596 return ret;
7597}
7598
7599
7600
7601
7602int perf_event_init_task(struct task_struct *child)
7603{
7604 int ctxn, ret;
7605
7606 memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
7607 mutex_init(&child->perf_event_mutex);
7608 INIT_LIST_HEAD(&child->perf_event_list);
7609
7610 for_each_task_context_nr(ctxn) {
7611 ret = perf_event_init_context(child, ctxn);
7612 if (ret)
7613 return ret;
7614 }
7615
7616 return 0;
7617}
7618
7619static void __init perf_event_init_all_cpus(void)
7620{
7621 struct swevent_htable *swhash;
7622 int cpu;
7623
7624 for_each_possible_cpu(cpu) {
7625 swhash = &per_cpu(swevent_htable, cpu);
7626 mutex_init(&swhash->hlist_mutex);
7627 INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
7628 }
7629}
7630
7631static void perf_event_init_cpu(int cpu)
7632{
7633 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7634
7635 mutex_lock(&swhash->hlist_mutex);
7636 if (swhash->hlist_refcount > 0) {
7637 struct swevent_hlist *hlist;
7638
7639 hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
7640 WARN_ON(!hlist);
7641 rcu_assign_pointer(swhash->swevent_hlist, hlist);
7642 }
7643 mutex_unlock(&swhash->hlist_mutex);
7644}
7645
7646#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
7647static void perf_pmu_rotate_stop(struct pmu *pmu)
7648{
7649 struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
7650
7651 WARN_ON(!irqs_disabled());
7652
7653 list_del_init(&cpuctx->rotation_list);
7654}
7655
7656static void __perf_event_exit_context(void *__info)
7657{
7658 struct perf_event_context *ctx = __info;
7659 struct perf_event *event, *tmp;
7660
7661 perf_pmu_rotate_stop(ctx->pmu);
7662
7663 list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
7664 __perf_remove_from_context(event);
7665 list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
7666 __perf_remove_from_context(event);
7667}
7668
7669static void perf_event_exit_cpu_context(int cpu)
7670{
7671 struct perf_event_context *ctx;
7672 struct pmu *pmu;
7673 int idx;
7674
7675 idx = srcu_read_lock(&pmus_srcu);
7676 list_for_each_entry_rcu(pmu, &pmus, entry) {
7677 ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
7678
7679 mutex_lock(&ctx->mutex);
7680 smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
7681 mutex_unlock(&ctx->mutex);
7682 }
7683 srcu_read_unlock(&pmus_srcu, idx);
7684}
7685
7686static void perf_event_exit_cpu(int cpu)
7687{
7688 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7689
7690 mutex_lock(&swhash->hlist_mutex);
7691 swevent_hlist_release(swhash);
7692 mutex_unlock(&swhash->hlist_mutex);
7693
7694 perf_event_exit_cpu_context(cpu);
7695}
7696#else
7697static inline void perf_event_exit_cpu(int cpu) { }
7698#endif
7699
7700static int
7701perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
7702{
7703 int cpu;
7704
7705 for_each_online_cpu(cpu)
7706 perf_event_exit_cpu(cpu);
7707
7708 return NOTIFY_OK;
7709}
7710
7711
7712
7713
7714
7715static struct notifier_block perf_reboot_notifier = {
7716 .notifier_call = perf_reboot,
7717 .priority = INT_MIN,
7718};
7719
7720static int
7721perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
7722{
7723 unsigned int cpu = (long)hcpu;
7724
7725 switch (action & ~CPU_TASKS_FROZEN) {
7726
7727 case CPU_UP_PREPARE:
7728 case CPU_DOWN_FAILED:
7729 perf_event_init_cpu(cpu);
7730 break;
7731
7732 case CPU_UP_CANCELED:
7733 case CPU_DOWN_PREPARE:
7734 perf_event_exit_cpu(cpu);
7735 break;
7736 default:
7737 break;
7738 }
7739
7740 return NOTIFY_OK;
7741}
7742
7743void __init perf_event_init(void)
7744{
7745 int ret;
7746
7747 idr_init(&pmu_idr);
7748
7749 perf_event_init_all_cpus();
7750 init_srcu_struct(&pmus_srcu);
7751 perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
7752 perf_pmu_register(&perf_cpu_clock, NULL, -1);
7753 perf_pmu_register(&perf_task_clock, NULL, -1);
7754 perf_tp_register();
7755 perf_cpu_notifier(perf_cpu_notify);
7756 register_reboot_notifier(&perf_reboot_notifier);
7757
7758 ret = init_hw_breakpoint();
7759 WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
7760
7761
7762 jump_label_rate_limit(&perf_sched_events, HZ);
7763
7764
7765
7766
7767
7768 BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
7769 != 1024);
7770}
7771
7772static int __init perf_event_sysfs_init(void)
7773{
7774 struct pmu *pmu;
7775 int ret;
7776
7777 mutex_lock(&pmus_lock);
7778
7779 ret = bus_register(&pmu_bus);
7780 if (ret)
7781 goto unlock;
7782
7783 list_for_each_entry(pmu, &pmus, entry) {
7784 if (!pmu->name || pmu->type < 0)
7785 continue;
7786
7787 ret = pmu_dev_alloc(pmu);
7788 WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
7789 }
7790 pmu_bus_running = 1;
7791 ret = 0;
7792
7793unlock:
7794 mutex_unlock(&pmus_lock);
7795
7796 return ret;
7797}
7798device_initcall(perf_event_sysfs_init);
7799
7800#ifdef CONFIG_CGROUP_PERF
7801static struct cgroup_subsys_state *perf_cgroup_css_alloc(struct cgroup *cont)
7802{
7803 struct perf_cgroup *jc;
7804
7805 jc = kzalloc(sizeof(*jc), GFP_KERNEL);
7806 if (!jc)
7807 return ERR_PTR(-ENOMEM);
7808
7809 jc->info = alloc_percpu(struct perf_cgroup_info);
7810 if (!jc->info) {
7811 kfree(jc);
7812 return ERR_PTR(-ENOMEM);
7813 }
7814
7815 return &jc->css;
7816}
7817
7818static void perf_cgroup_css_free(struct cgroup *cont)
7819{
7820 struct perf_cgroup *jc;
7821 jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
7822 struct perf_cgroup, css);
7823 free_percpu(jc->info);
7824 kfree(jc);
7825}
7826
7827static int __perf_cgroup_move(void *info)
7828{
7829 struct task_struct *task = info;
7830 perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
7831 return 0;
7832}
7833
7834static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
7835{
7836 struct task_struct *task;
7837
7838 cgroup_taskset_for_each(task, cgrp, tset)
7839 task_function_call(task, __perf_cgroup_move, task);
7840}
7841
7842static void perf_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
7843 struct task_struct *task)
7844{
7845
7846
7847
7848
7849
7850 if (!(task->flags & PF_EXITING))
7851 return;
7852
7853 task_function_call(task, __perf_cgroup_move, task);
7854}
7855
7856struct cgroup_subsys perf_subsys = {
7857 .name = "perf_event",
7858 .subsys_id = perf_subsys_id,
7859 .css_alloc = perf_cgroup_css_alloc,
7860 .css_free = perf_cgroup_css_free,
7861 .exit = perf_cgroup_exit,
7862 .attach = perf_cgroup_attach,
7863};
7864#endif
7865