linux/kernel/events/core.c
<<
>>
Prefs
   1/*
   2 * Performance events core code:
   3 *
   4 *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
   5 *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
   6 *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
   7 *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
   8 *
   9 * For licensing details see kernel-base/COPYING
  10 */
  11
  12#include <linux/fs.h>
  13#include <linux/mm.h>
  14#include <linux/cpu.h>
  15#include <linux/smp.h>
  16#include <linux/idr.h>
  17#include <linux/file.h>
  18#include <linux/poll.h>
  19#include <linux/slab.h>
  20#include <linux/hash.h>
  21#include <linux/tick.h>
  22#include <linux/sysfs.h>
  23#include <linux/dcache.h>
  24#include <linux/percpu.h>
  25#include <linux/ptrace.h>
  26#include <linux/reboot.h>
  27#include <linux/vmstat.h>
  28#include <linux/device.h>
  29#include <linux/export.h>
  30#include <linux/vmalloc.h>
  31#include <linux/hardirq.h>
  32#include <linux/rculist.h>
  33#include <linux/uaccess.h>
  34#include <linux/syscalls.h>
  35#include <linux/anon_inodes.h>
  36#include <linux/kernel_stat.h>
  37#include <linux/perf_event.h>
  38#include <linux/ftrace_event.h>
  39#include <linux/hw_breakpoint.h>
  40#include <linux/mm_types.h>
  41#include <linux/cgroup.h>
  42
  43#include "internal.h"
  44
  45#include <asm/irq_regs.h>
  46
  47struct remote_function_call {
  48        struct task_struct      *p;
  49        int                     (*func)(void *info);
  50        void                    *info;
  51        int                     ret;
  52};
  53
  54static void remote_function(void *data)
  55{
  56        struct remote_function_call *tfc = data;
  57        struct task_struct *p = tfc->p;
  58
  59        if (p) {
  60                tfc->ret = -EAGAIN;
  61                if (task_cpu(p) != smp_processor_id() || !task_curr(p))
  62                        return;
  63        }
  64
  65        tfc->ret = tfc->func(tfc->info);
  66}
  67
  68/**
  69 * task_function_call - call a function on the cpu on which a task runs
  70 * @p:          the task to evaluate
  71 * @func:       the function to be called
  72 * @info:       the function call argument
  73 *
  74 * Calls the function @func when the task is currently running. This might
  75 * be on the current CPU, which just calls the function directly
  76 *
  77 * returns: @func return value, or
  78 *          -ESRCH  - when the process isn't running
  79 *          -EAGAIN - when the process moved away
  80 */
  81static int
  82task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
  83{
  84        struct remote_function_call data = {
  85                .p      = p,
  86                .func   = func,
  87                .info   = info,
  88                .ret    = -ESRCH, /* No such (running) process */
  89        };
  90
  91        if (task_curr(p))
  92                smp_call_function_single(task_cpu(p), remote_function, &data, 1);
  93
  94        return data.ret;
  95}
  96
  97/**
  98 * cpu_function_call - call a function on the cpu
  99 * @func:       the function to be called
 100 * @info:       the function call argument
 101 *
 102 * Calls the function @func on the remote cpu.
 103 *
 104 * returns: @func return value or -ENXIO when the cpu is offline
 105 */
 106static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
 107{
 108        struct remote_function_call data = {
 109                .p      = NULL,
 110                .func   = func,
 111                .info   = info,
 112                .ret    = -ENXIO, /* No such CPU */
 113        };
 114
 115        smp_call_function_single(cpu, remote_function, &data, 1);
 116
 117        return data.ret;
 118}
 119
 120#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
 121                       PERF_FLAG_FD_OUTPUT  |\
 122                       PERF_FLAG_PID_CGROUP)
 123
 124/*
 125 * branch priv levels that need permission checks
 126 */
 127#define PERF_SAMPLE_BRANCH_PERM_PLM \
 128        (PERF_SAMPLE_BRANCH_KERNEL |\
 129         PERF_SAMPLE_BRANCH_HV)
 130
 131enum event_type_t {
 132        EVENT_FLEXIBLE = 0x1,
 133        EVENT_PINNED = 0x2,
 134        EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
 135};
 136
 137/*
 138 * perf_sched_events : >0 events exist
 139 * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
 140 */
 141struct static_key_deferred perf_sched_events __read_mostly;
 142static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
 143static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
 144
 145static atomic_t nr_mmap_events __read_mostly;
 146static atomic_t nr_comm_events __read_mostly;
 147static atomic_t nr_task_events __read_mostly;
 148
 149static LIST_HEAD(pmus);
 150static DEFINE_MUTEX(pmus_lock);
 151static struct srcu_struct pmus_srcu;
 152
 153/*
 154 * perf event paranoia level:
 155 *  -1 - not paranoid at all
 156 *   0 - disallow raw tracepoint access for unpriv
 157 *   1 - disallow cpu events for unpriv
 158 *   2 - disallow kernel profiling for unpriv
 159 */
 160int sysctl_perf_event_paranoid __read_mostly = 1;
 161
 162/* Minimum for 512 kiB + 1 user control page */
 163int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
 164
 165/*
 166 * max perf event sample rate
 167 */
 168#define DEFAULT_MAX_SAMPLE_RATE 100000
 169int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
 170static int max_samples_per_tick __read_mostly =
 171        DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
 172
 173int perf_proc_update_handler(struct ctl_table *table, int write,
 174                void __user *buffer, size_t *lenp,
 175                loff_t *ppos)
 176{
 177        int ret = proc_dointvec(table, write, buffer, lenp, ppos);
 178
 179        if (ret || !write)
 180                return ret;
 181
 182        max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
 183
 184        return 0;
 185}
 186
 187static atomic64_t perf_event_id;
 188
 189static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
 190                              enum event_type_t event_type);
 191
 192static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
 193                             enum event_type_t event_type,
 194                             struct task_struct *task);
 195
 196static void update_context_time(struct perf_event_context *ctx);
 197static u64 perf_event_time(struct perf_event *event);
 198
 199void __weak perf_event_print_debug(void)        { }
 200
 201extern __weak const char *perf_pmu_name(void)
 202{
 203        return "pmu";
 204}
 205
 206static inline u64 perf_clock(void)
 207{
 208        return local_clock();
 209}
 210
 211static inline struct perf_cpu_context *
 212__get_cpu_context(struct perf_event_context *ctx)
 213{
 214        return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
 215}
 216
 217static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
 218                          struct perf_event_context *ctx)
 219{
 220        raw_spin_lock(&cpuctx->ctx.lock);
 221        if (ctx)
 222                raw_spin_lock(&ctx->lock);
 223}
 224
 225static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
 226                            struct perf_event_context *ctx)
 227{
 228        if (ctx)
 229                raw_spin_unlock(&ctx->lock);
 230        raw_spin_unlock(&cpuctx->ctx.lock);
 231}
 232
 233#ifdef CONFIG_CGROUP_PERF
 234
 235/*
 236 * perf_cgroup_info keeps track of time_enabled for a cgroup.
 237 * This is a per-cpu dynamically allocated data structure.
 238 */
 239struct perf_cgroup_info {
 240        u64                             time;
 241        u64                             timestamp;
 242};
 243
 244struct perf_cgroup {
 245        struct cgroup_subsys_state      css;
 246        struct perf_cgroup_info __percpu *info;
 247};
 248
 249/*
 250 * Must ensure cgroup is pinned (css_get) before calling
 251 * this function. In other words, we cannot call this function
 252 * if there is no cgroup event for the current CPU context.
 253 */
 254static inline struct perf_cgroup *
 255perf_cgroup_from_task(struct task_struct *task)
 256{
 257        return container_of(task_subsys_state(task, perf_subsys_id),
 258                        struct perf_cgroup, css);
 259}
 260
 261static inline bool
 262perf_cgroup_match(struct perf_event *event)
 263{
 264        struct perf_event_context *ctx = event->ctx;
 265        struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
 266
 267        /* @event doesn't care about cgroup */
 268        if (!event->cgrp)
 269                return true;
 270
 271        /* wants specific cgroup scope but @cpuctx isn't associated with any */
 272        if (!cpuctx->cgrp)
 273                return false;
 274
 275        /*
 276         * Cgroup scoping is recursive.  An event enabled for a cgroup is
 277         * also enabled for all its descendant cgroups.  If @cpuctx's
 278         * cgroup is a descendant of @event's (the test covers identity
 279         * case), it's a match.
 280         */
 281        return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
 282                                    event->cgrp->css.cgroup);
 283}
 284
 285static inline bool perf_tryget_cgroup(struct perf_event *event)
 286{
 287        return css_tryget(&event->cgrp->css);
 288}
 289
 290static inline void perf_put_cgroup(struct perf_event *event)
 291{
 292        css_put(&event->cgrp->css);
 293}
 294
 295static inline void perf_detach_cgroup(struct perf_event *event)
 296{
 297        perf_put_cgroup(event);
 298        event->cgrp = NULL;
 299}
 300
 301static inline int is_cgroup_event(struct perf_event *event)
 302{
 303        return event->cgrp != NULL;
 304}
 305
 306static inline u64 perf_cgroup_event_time(struct perf_event *event)
 307{
 308        struct perf_cgroup_info *t;
 309
 310        t = per_cpu_ptr(event->cgrp->info, event->cpu);
 311        return t->time;
 312}
 313
 314static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
 315{
 316        struct perf_cgroup_info *info;
 317        u64 now;
 318
 319        now = perf_clock();
 320
 321        info = this_cpu_ptr(cgrp->info);
 322
 323        info->time += now - info->timestamp;
 324        info->timestamp = now;
 325}
 326
 327static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
 328{
 329        struct perf_cgroup *cgrp_out = cpuctx->cgrp;
 330        if (cgrp_out)
 331                __update_cgrp_time(cgrp_out);
 332}
 333
 334static inline void update_cgrp_time_from_event(struct perf_event *event)
 335{
 336        struct perf_cgroup *cgrp;
 337
 338        /*
 339         * ensure we access cgroup data only when needed and
 340         * when we know the cgroup is pinned (css_get)
 341         */
 342        if (!is_cgroup_event(event))
 343                return;
 344
 345        cgrp = perf_cgroup_from_task(current);
 346        /*
 347         * Do not update time when cgroup is not active
 348         */
 349        if (cgrp == event->cgrp)
 350                __update_cgrp_time(event->cgrp);
 351}
 352
 353static inline void
 354perf_cgroup_set_timestamp(struct task_struct *task,
 355                          struct perf_event_context *ctx)
 356{
 357        struct perf_cgroup *cgrp;
 358        struct perf_cgroup_info *info;
 359
 360        /*
 361         * ctx->lock held by caller
 362         * ensure we do not access cgroup data
 363         * unless we have the cgroup pinned (css_get)
 364         */
 365        if (!task || !ctx->nr_cgroups)
 366                return;
 367
 368        cgrp = perf_cgroup_from_task(task);
 369        info = this_cpu_ptr(cgrp->info);
 370        info->timestamp = ctx->timestamp;
 371}
 372
 373#define PERF_CGROUP_SWOUT       0x1 /* cgroup switch out every event */
 374#define PERF_CGROUP_SWIN        0x2 /* cgroup switch in events based on task */
 375
 376/*
 377 * reschedule events based on the cgroup constraint of task.
 378 *
 379 * mode SWOUT : schedule out everything
 380 * mode SWIN : schedule in based on cgroup for next
 381 */
 382void perf_cgroup_switch(struct task_struct *task, int mode)
 383{
 384        struct perf_cpu_context *cpuctx;
 385        struct pmu *pmu;
 386        unsigned long flags;
 387
 388        /*
 389         * disable interrupts to avoid geting nr_cgroup
 390         * changes via __perf_event_disable(). Also
 391         * avoids preemption.
 392         */
 393        local_irq_save(flags);
 394
 395        /*
 396         * we reschedule only in the presence of cgroup
 397         * constrained events.
 398         */
 399        rcu_read_lock();
 400
 401        list_for_each_entry_rcu(pmu, &pmus, entry) {
 402                cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
 403                if (cpuctx->unique_pmu != pmu)
 404                        continue; /* ensure we process each cpuctx once */
 405
 406                /*
 407                 * perf_cgroup_events says at least one
 408                 * context on this CPU has cgroup events.
 409                 *
 410                 * ctx->nr_cgroups reports the number of cgroup
 411                 * events for a context.
 412                 */
 413                if (cpuctx->ctx.nr_cgroups > 0) {
 414                        perf_ctx_lock(cpuctx, cpuctx->task_ctx);
 415                        perf_pmu_disable(cpuctx->ctx.pmu);
 416
 417                        if (mode & PERF_CGROUP_SWOUT) {
 418                                cpu_ctx_sched_out(cpuctx, EVENT_ALL);
 419                                /*
 420                                 * must not be done before ctxswout due
 421                                 * to event_filter_match() in event_sched_out()
 422                                 */
 423                                cpuctx->cgrp = NULL;
 424                        }
 425
 426                        if (mode & PERF_CGROUP_SWIN) {
 427                                WARN_ON_ONCE(cpuctx->cgrp);
 428                                /*
 429                                 * set cgrp before ctxsw in to allow
 430                                 * event_filter_match() to not have to pass
 431                                 * task around
 432                                 */
 433                                cpuctx->cgrp = perf_cgroup_from_task(task);
 434                                cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
 435                        }
 436                        perf_pmu_enable(cpuctx->ctx.pmu);
 437                        perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
 438                }
 439        }
 440
 441        rcu_read_unlock();
 442
 443        local_irq_restore(flags);
 444}
 445
 446static inline void perf_cgroup_sched_out(struct task_struct *task,
 447                                         struct task_struct *next)
 448{
 449        struct perf_cgroup *cgrp1;
 450        struct perf_cgroup *cgrp2 = NULL;
 451
 452        /*
 453         * we come here when we know perf_cgroup_events > 0
 454         */
 455        cgrp1 = perf_cgroup_from_task(task);
 456
 457        /*
 458         * next is NULL when called from perf_event_enable_on_exec()
 459         * that will systematically cause a cgroup_switch()
 460         */
 461        if (next)
 462                cgrp2 = perf_cgroup_from_task(next);
 463
 464        /*
 465         * only schedule out current cgroup events if we know
 466         * that we are switching to a different cgroup. Otherwise,
 467         * do no touch the cgroup events.
 468         */
 469        if (cgrp1 != cgrp2)
 470                perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
 471}
 472
 473static inline void perf_cgroup_sched_in(struct task_struct *prev,
 474                                        struct task_struct *task)
 475{
 476        struct perf_cgroup *cgrp1;
 477        struct perf_cgroup *cgrp2 = NULL;
 478
 479        /*
 480         * we come here when we know perf_cgroup_events > 0
 481         */
 482        cgrp1 = perf_cgroup_from_task(task);
 483
 484        /* prev can never be NULL */
 485        cgrp2 = perf_cgroup_from_task(prev);
 486
 487        /*
 488         * only need to schedule in cgroup events if we are changing
 489         * cgroup during ctxsw. Cgroup events were not scheduled
 490         * out of ctxsw out if that was not the case.
 491         */
 492        if (cgrp1 != cgrp2)
 493                perf_cgroup_switch(task, PERF_CGROUP_SWIN);
 494}
 495
 496static inline int perf_cgroup_connect(int fd, struct perf_event *event,
 497                                      struct perf_event_attr *attr,
 498                                      struct perf_event *group_leader)
 499{
 500        struct perf_cgroup *cgrp;
 501        struct cgroup_subsys_state *css;
 502        struct fd f = fdget(fd);
 503        int ret = 0;
 504
 505        if (!f.file)
 506                return -EBADF;
 507
 508        css = cgroup_css_from_dir(f.file, perf_subsys_id);
 509        if (IS_ERR(css)) {
 510                ret = PTR_ERR(css);
 511                goto out;
 512        }
 513
 514        cgrp = container_of(css, struct perf_cgroup, css);
 515        event->cgrp = cgrp;
 516
 517        /* must be done before we fput() the file */
 518        if (!perf_tryget_cgroup(event)) {
 519                event->cgrp = NULL;
 520                ret = -ENOENT;
 521                goto out;
 522        }
 523
 524        /*
 525         * all events in a group must monitor
 526         * the same cgroup because a task belongs
 527         * to only one perf cgroup at a time
 528         */
 529        if (group_leader && group_leader->cgrp != cgrp) {
 530                perf_detach_cgroup(event);
 531                ret = -EINVAL;
 532        }
 533out:
 534        fdput(f);
 535        return ret;
 536}
 537
 538static inline void
 539perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
 540{
 541        struct perf_cgroup_info *t;
 542        t = per_cpu_ptr(event->cgrp->info, event->cpu);
 543        event->shadow_ctx_time = now - t->timestamp;
 544}
 545
 546static inline void
 547perf_cgroup_defer_enabled(struct perf_event *event)
 548{
 549        /*
 550         * when the current task's perf cgroup does not match
 551         * the event's, we need to remember to call the
 552         * perf_mark_enable() function the first time a task with
 553         * a matching perf cgroup is scheduled in.
 554         */
 555        if (is_cgroup_event(event) && !perf_cgroup_match(event))
 556                event->cgrp_defer_enabled = 1;
 557}
 558
 559static inline void
 560perf_cgroup_mark_enabled(struct perf_event *event,
 561                         struct perf_event_context *ctx)
 562{
 563        struct perf_event *sub;
 564        u64 tstamp = perf_event_time(event);
 565
 566        if (!event->cgrp_defer_enabled)
 567                return;
 568
 569        event->cgrp_defer_enabled = 0;
 570
 571        event->tstamp_enabled = tstamp - event->total_time_enabled;
 572        list_for_each_entry(sub, &event->sibling_list, group_entry) {
 573                if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
 574                        sub->tstamp_enabled = tstamp - sub->total_time_enabled;
 575                        sub->cgrp_defer_enabled = 0;
 576                }
 577        }
 578}
 579#else /* !CONFIG_CGROUP_PERF */
 580
 581static inline bool
 582perf_cgroup_match(struct perf_event *event)
 583{
 584        return true;
 585}
 586
 587static inline void perf_detach_cgroup(struct perf_event *event)
 588{}
 589
 590static inline int is_cgroup_event(struct perf_event *event)
 591{
 592        return 0;
 593}
 594
 595static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
 596{
 597        return 0;
 598}
 599
 600static inline void update_cgrp_time_from_event(struct perf_event *event)
 601{
 602}
 603
 604static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
 605{
 606}
 607
 608static inline void perf_cgroup_sched_out(struct task_struct *task,
 609                                         struct task_struct *next)
 610{
 611}
 612
 613static inline void perf_cgroup_sched_in(struct task_struct *prev,
 614                                        struct task_struct *task)
 615{
 616}
 617
 618static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
 619                                      struct perf_event_attr *attr,
 620                                      struct perf_event *group_leader)
 621{
 622        return -EINVAL;
 623}
 624
 625static inline void
 626perf_cgroup_set_timestamp(struct task_struct *task,
 627                          struct perf_event_context *ctx)
 628{
 629}
 630
 631void
 632perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
 633{
 634}
 635
 636static inline void
 637perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
 638{
 639}
 640
 641static inline u64 perf_cgroup_event_time(struct perf_event *event)
 642{
 643        return 0;
 644}
 645
 646static inline void
 647perf_cgroup_defer_enabled(struct perf_event *event)
 648{
 649}
 650
 651static inline void
 652perf_cgroup_mark_enabled(struct perf_event *event,
 653                         struct perf_event_context *ctx)
 654{
 655}
 656#endif
 657
 658void perf_pmu_disable(struct pmu *pmu)
 659{
 660        int *count = this_cpu_ptr(pmu->pmu_disable_count);
 661        if (!(*count)++)
 662                pmu->pmu_disable(pmu);
 663}
 664
 665void perf_pmu_enable(struct pmu *pmu)
 666{
 667        int *count = this_cpu_ptr(pmu->pmu_disable_count);
 668        if (!--(*count))
 669                pmu->pmu_enable(pmu);
 670}
 671
 672static DEFINE_PER_CPU(struct list_head, rotation_list);
 673
 674/*
 675 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
 676 * because they're strictly cpu affine and rotate_start is called with IRQs
 677 * disabled, while rotate_context is called from IRQ context.
 678 */
 679static void perf_pmu_rotate_start(struct pmu *pmu)
 680{
 681        struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
 682        struct list_head *head = &__get_cpu_var(rotation_list);
 683
 684        WARN_ON(!irqs_disabled());
 685
 686        if (list_empty(&cpuctx->rotation_list)) {
 687                int was_empty = list_empty(head);
 688                list_add(&cpuctx->rotation_list, head);
 689                if (was_empty)
 690                        tick_nohz_full_kick();
 691        }
 692}
 693
 694static void get_ctx(struct perf_event_context *ctx)
 695{
 696        WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
 697}
 698
 699static void put_ctx(struct perf_event_context *ctx)
 700{
 701        if (atomic_dec_and_test(&ctx->refcount)) {
 702                if (ctx->parent_ctx)
 703                        put_ctx(ctx->parent_ctx);
 704                if (ctx->task)
 705                        put_task_struct(ctx->task);
 706                kfree_rcu(ctx, rcu_head);
 707        }
 708}
 709
 710static void unclone_ctx(struct perf_event_context *ctx)
 711{
 712        if (ctx->parent_ctx) {
 713                put_ctx(ctx->parent_ctx);
 714                ctx->parent_ctx = NULL;
 715        }
 716}
 717
 718static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
 719{
 720        /*
 721         * only top level events have the pid namespace they were created in
 722         */
 723        if (event->parent)
 724                event = event->parent;
 725
 726        return task_tgid_nr_ns(p, event->ns);
 727}
 728
 729static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
 730{
 731        /*
 732         * only top level events have the pid namespace they were created in
 733         */
 734        if (event->parent)
 735                event = event->parent;
 736
 737        return task_pid_nr_ns(p, event->ns);
 738}
 739
 740/*
 741 * If we inherit events we want to return the parent event id
 742 * to userspace.
 743 */
 744static u64 primary_event_id(struct perf_event *event)
 745{
 746        u64 id = event->id;
 747
 748        if (event->parent)
 749                id = event->parent->id;
 750
 751        return id;
 752}
 753
 754/*
 755 * Get the perf_event_context for a task and lock it.
 756 * This has to cope with with the fact that until it is locked,
 757 * the context could get moved to another task.
 758 */
 759static struct perf_event_context *
 760perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
 761{
 762        struct perf_event_context *ctx;
 763
 764        rcu_read_lock();
 765retry:
 766        ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
 767        if (ctx) {
 768                /*
 769                 * If this context is a clone of another, it might
 770                 * get swapped for another underneath us by
 771                 * perf_event_task_sched_out, though the
 772                 * rcu_read_lock() protects us from any context
 773                 * getting freed.  Lock the context and check if it
 774                 * got swapped before we could get the lock, and retry
 775                 * if so.  If we locked the right context, then it
 776                 * can't get swapped on us any more.
 777                 */
 778                raw_spin_lock_irqsave(&ctx->lock, *flags);
 779                if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
 780                        raw_spin_unlock_irqrestore(&ctx->lock, *flags);
 781                        goto retry;
 782                }
 783
 784                if (!atomic_inc_not_zero(&ctx->refcount)) {
 785                        raw_spin_unlock_irqrestore(&ctx->lock, *flags);
 786                        ctx = NULL;
 787                }
 788        }
 789        rcu_read_unlock();
 790        return ctx;
 791}
 792
 793/*
 794 * Get the context for a task and increment its pin_count so it
 795 * can't get swapped to another task.  This also increments its
 796 * reference count so that the context can't get freed.
 797 */
 798static struct perf_event_context *
 799perf_pin_task_context(struct task_struct *task, int ctxn)
 800{
 801        struct perf_event_context *ctx;
 802        unsigned long flags;
 803
 804        ctx = perf_lock_task_context(task, ctxn, &flags);
 805        if (ctx) {
 806                ++ctx->pin_count;
 807                raw_spin_unlock_irqrestore(&ctx->lock, flags);
 808        }
 809        return ctx;
 810}
 811
 812static void perf_unpin_context(struct perf_event_context *ctx)
 813{
 814        unsigned long flags;
 815
 816        raw_spin_lock_irqsave(&ctx->lock, flags);
 817        --ctx->pin_count;
 818        raw_spin_unlock_irqrestore(&ctx->lock, flags);
 819}
 820
 821/*
 822 * Update the record of the current time in a context.
 823 */
 824static void update_context_time(struct perf_event_context *ctx)
 825{
 826        u64 now = perf_clock();
 827
 828        ctx->time += now - ctx->timestamp;
 829        ctx->timestamp = now;
 830}
 831
 832static u64 perf_event_time(struct perf_event *event)
 833{
 834        struct perf_event_context *ctx = event->ctx;
 835
 836        if (is_cgroup_event(event))
 837                return perf_cgroup_event_time(event);
 838
 839        return ctx ? ctx->time : 0;
 840}
 841
 842/*
 843 * Update the total_time_enabled and total_time_running fields for a event.
 844 * The caller of this function needs to hold the ctx->lock.
 845 */
 846static void update_event_times(struct perf_event *event)
 847{
 848        struct perf_event_context *ctx = event->ctx;
 849        u64 run_end;
 850
 851        if (event->state < PERF_EVENT_STATE_INACTIVE ||
 852            event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
 853                return;
 854        /*
 855         * in cgroup mode, time_enabled represents
 856         * the time the event was enabled AND active
 857         * tasks were in the monitored cgroup. This is
 858         * independent of the activity of the context as
 859         * there may be a mix of cgroup and non-cgroup events.
 860         *
 861         * That is why we treat cgroup events differently
 862         * here.
 863         */
 864        if (is_cgroup_event(event))
 865                run_end = perf_cgroup_event_time(event);
 866        else if (ctx->is_active)
 867                run_end = ctx->time;
 868        else
 869                run_end = event->tstamp_stopped;
 870
 871        event->total_time_enabled = run_end - event->tstamp_enabled;
 872
 873        if (event->state == PERF_EVENT_STATE_INACTIVE)
 874                run_end = event->tstamp_stopped;
 875        else
 876                run_end = perf_event_time(event);
 877
 878        event->total_time_running = run_end - event->tstamp_running;
 879
 880}
 881
 882/*
 883 * Update total_time_enabled and total_time_running for all events in a group.
 884 */
 885static void update_group_times(struct perf_event *leader)
 886{
 887        struct perf_event *event;
 888
 889        update_event_times(leader);
 890        list_for_each_entry(event, &leader->sibling_list, group_entry)
 891                update_event_times(event);
 892}
 893
 894static struct list_head *
 895ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
 896{
 897        if (event->attr.pinned)
 898                return &ctx->pinned_groups;
 899        else
 900                return &ctx->flexible_groups;
 901}
 902
 903/*
 904 * Add a event from the lists for its context.
 905 * Must be called with ctx->mutex and ctx->lock held.
 906 */
 907static void
 908list_add_event(struct perf_event *event, struct perf_event_context *ctx)
 909{
 910        WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
 911        event->attach_state |= PERF_ATTACH_CONTEXT;
 912
 913        /*
 914         * If we're a stand alone event or group leader, we go to the context
 915         * list, group events are kept attached to the group so that
 916         * perf_group_detach can, at all times, locate all siblings.
 917         */
 918        if (event->group_leader == event) {
 919                struct list_head *list;
 920
 921                if (is_software_event(event))
 922                        event->group_flags |= PERF_GROUP_SOFTWARE;
 923
 924                list = ctx_group_list(event, ctx);
 925                list_add_tail(&event->group_entry, list);
 926        }
 927
 928        if (is_cgroup_event(event))
 929                ctx->nr_cgroups++;
 930
 931        if (has_branch_stack(event))
 932                ctx->nr_branch_stack++;
 933
 934        list_add_rcu(&event->event_entry, &ctx->event_list);
 935        if (!ctx->nr_events)
 936                perf_pmu_rotate_start(ctx->pmu);
 937        ctx->nr_events++;
 938        if (event->attr.inherit_stat)
 939                ctx->nr_stat++;
 940}
 941
 942/*
 943 * Initialize event state based on the perf_event_attr::disabled.
 944 */
 945static inline void perf_event__state_init(struct perf_event *event)
 946{
 947        event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
 948                                              PERF_EVENT_STATE_INACTIVE;
 949}
 950
 951/*
 952 * Called at perf_event creation and when events are attached/detached from a
 953 * group.
 954 */
 955static void perf_event__read_size(struct perf_event *event)
 956{
 957        int entry = sizeof(u64); /* value */
 958        int size = 0;
 959        int nr = 1;
 960
 961        if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
 962                size += sizeof(u64);
 963
 964        if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
 965                size += sizeof(u64);
 966
 967        if (event->attr.read_format & PERF_FORMAT_ID)
 968                entry += sizeof(u64);
 969
 970        if (event->attr.read_format & PERF_FORMAT_GROUP) {
 971                nr += event->group_leader->nr_siblings;
 972                size += sizeof(u64);
 973        }
 974
 975        size += entry * nr;
 976        event->read_size = size;
 977}
 978
 979static void perf_event__header_size(struct perf_event *event)
 980{
 981        struct perf_sample_data *data;
 982        u64 sample_type = event->attr.sample_type;
 983        u16 size = 0;
 984
 985        perf_event__read_size(event);
 986
 987        if (sample_type & PERF_SAMPLE_IP)
 988                size += sizeof(data->ip);
 989
 990        if (sample_type & PERF_SAMPLE_ADDR)
 991                size += sizeof(data->addr);
 992
 993        if (sample_type & PERF_SAMPLE_PERIOD)
 994                size += sizeof(data->period);
 995
 996        if (sample_type & PERF_SAMPLE_WEIGHT)
 997                size += sizeof(data->weight);
 998
 999        if (sample_type & PERF_SAMPLE_READ)
1000                size += event->read_size;
1001
1002        if (sample_type & PERF_SAMPLE_DATA_SRC)
1003                size += sizeof(data->data_src.val);
1004
1005        event->header_size = size;
1006}
1007
1008static void perf_event__id_header_size(struct perf_event *event)
1009{
1010        struct perf_sample_data *data;
1011        u64 sample_type = event->attr.sample_type;
1012        u16 size = 0;
1013
1014        if (sample_type & PERF_SAMPLE_TID)
1015                size += sizeof(data->tid_entry);
1016
1017        if (sample_type & PERF_SAMPLE_TIME)
1018                size += sizeof(data->time);
1019
1020        if (sample_type & PERF_SAMPLE_ID)
1021                size += sizeof(data->id);
1022
1023        if (sample_type & PERF_SAMPLE_STREAM_ID)
1024                size += sizeof(data->stream_id);
1025
1026        if (sample_type & PERF_SAMPLE_CPU)
1027                size += sizeof(data->cpu_entry);
1028
1029        event->id_header_size = size;
1030}
1031
1032static void perf_group_attach(struct perf_event *event)
1033{
1034        struct perf_event *group_leader = event->group_leader, *pos;
1035
1036        /*
1037         * We can have double attach due to group movement in perf_event_open.
1038         */
1039        if (event->attach_state & PERF_ATTACH_GROUP)
1040                return;
1041
1042        event->attach_state |= PERF_ATTACH_GROUP;
1043
1044        if (group_leader == event)
1045                return;
1046
1047        if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
1048                        !is_software_event(event))
1049                group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
1050
1051        list_add_tail(&event->group_entry, &group_leader->sibling_list);
1052        group_leader->nr_siblings++;
1053
1054        perf_event__header_size(group_leader);
1055
1056        list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
1057                perf_event__header_size(pos);
1058}
1059
1060/*
1061 * Remove a event from the lists for its context.
1062 * Must be called with ctx->mutex and ctx->lock held.
1063 */
1064static void
1065list_del_event(struct perf_event *event, struct perf_event_context *ctx)
1066{
1067        struct perf_cpu_context *cpuctx;
1068        /*
1069         * We can have double detach due to exit/hot-unplug + close.
1070         */
1071        if (!(event->attach_state & PERF_ATTACH_CONTEXT))
1072                return;
1073
1074        event->attach_state &= ~PERF_ATTACH_CONTEXT;
1075
1076        if (is_cgroup_event(event)) {
1077                ctx->nr_cgroups--;
1078                cpuctx = __get_cpu_context(ctx);
1079                /*
1080                 * if there are no more cgroup events
1081                 * then cler cgrp to avoid stale pointer
1082                 * in update_cgrp_time_from_cpuctx()
1083                 */
1084                if (!ctx->nr_cgroups)
1085                        cpuctx->cgrp = NULL;
1086        }
1087
1088        if (has_branch_stack(event))
1089                ctx->nr_branch_stack--;
1090
1091        ctx->nr_events--;
1092        if (event->attr.inherit_stat)
1093                ctx->nr_stat--;
1094
1095        list_del_rcu(&event->event_entry);
1096
1097        if (event->group_leader == event)
1098                list_del_init(&event->group_entry);
1099
1100        update_group_times(event);
1101
1102        /*
1103         * If event was in error state, then keep it
1104         * that way, otherwise bogus counts will be
1105         * returned on read(). The only way to get out
1106         * of error state is by explicit re-enabling
1107         * of the event
1108         */
1109        if (event->state > PERF_EVENT_STATE_OFF)
1110                event->state = PERF_EVENT_STATE_OFF;
1111}
1112
1113static void perf_group_detach(struct perf_event *event)
1114{
1115        struct perf_event *sibling, *tmp;
1116        struct list_head *list = NULL;
1117
1118        /*
1119         * We can have double detach due to exit/hot-unplug + close.
1120         */
1121        if (!(event->attach_state & PERF_ATTACH_GROUP))
1122                return;
1123
1124        event->attach_state &= ~PERF_ATTACH_GROUP;
1125
1126        /*
1127         * If this is a sibling, remove it from its group.
1128         */
1129        if (event->group_leader != event) {
1130                list_del_init(&event->group_entry);
1131                event->group_leader->nr_siblings--;
1132                goto out;
1133        }
1134
1135        if (!list_empty(&event->group_entry))
1136                list = &event->group_entry;
1137
1138        /*
1139         * If this was a group event with sibling events then
1140         * upgrade the siblings to singleton events by adding them
1141         * to whatever list we are on.
1142         */
1143        list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
1144                if (list)
1145                        list_move_tail(&sibling->group_entry, list);
1146                sibling->group_leader = sibling;
1147
1148                /* Inherit group flags from the previous leader */
1149                sibling->group_flags = event->group_flags;
1150        }
1151
1152out:
1153        perf_event__header_size(event->group_leader);
1154
1155        list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
1156                perf_event__header_size(tmp);
1157}
1158
1159static inline int
1160event_filter_match(struct perf_event *event)
1161{
1162        return (event->cpu == -1 || event->cpu == smp_processor_id())
1163            && perf_cgroup_match(event);
1164}
1165
1166static void
1167event_sched_out(struct perf_event *event,
1168                  struct perf_cpu_context *cpuctx,
1169                  struct perf_event_context *ctx)
1170{
1171        u64 tstamp = perf_event_time(event);
1172        u64 delta;
1173        /*
1174         * An event which could not be activated because of
1175         * filter mismatch still needs to have its timings
1176         * maintained, otherwise bogus information is return
1177         * via read() for time_enabled, time_running:
1178         */
1179        if (event->state == PERF_EVENT_STATE_INACTIVE
1180            && !event_filter_match(event)) {
1181                delta = tstamp - event->tstamp_stopped;
1182                event->tstamp_running += delta;
1183                event->tstamp_stopped = tstamp;
1184        }
1185
1186        if (event->state != PERF_EVENT_STATE_ACTIVE)
1187                return;
1188
1189        event->state = PERF_EVENT_STATE_INACTIVE;
1190        if (event->pending_disable) {
1191                event->pending_disable = 0;
1192                event->state = PERF_EVENT_STATE_OFF;
1193        }
1194        event->tstamp_stopped = tstamp;
1195        event->pmu->del(event, 0);
1196        event->oncpu = -1;
1197
1198        if (!is_software_event(event))
1199                cpuctx->active_oncpu--;
1200        ctx->nr_active--;
1201        if (event->attr.freq && event->attr.sample_freq)
1202                ctx->nr_freq--;
1203        if (event->attr.exclusive || !cpuctx->active_oncpu)
1204                cpuctx->exclusive = 0;
1205}
1206
1207static void
1208group_sched_out(struct perf_event *group_event,
1209                struct perf_cpu_context *cpuctx,
1210                struct perf_event_context *ctx)
1211{
1212        struct perf_event *event;
1213        int state = group_event->state;
1214
1215        event_sched_out(group_event, cpuctx, ctx);
1216
1217        /*
1218         * Schedule out siblings (if any):
1219         */
1220        list_for_each_entry(event, &group_event->sibling_list, group_entry)
1221                event_sched_out(event, cpuctx, ctx);
1222
1223        if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
1224                cpuctx->exclusive = 0;
1225}
1226
1227/*
1228 * Cross CPU call to remove a performance event
1229 *
1230 * We disable the event on the hardware level first. After that we
1231 * remove it from the context list.
1232 */
1233static int __perf_remove_from_context(void *info)
1234{
1235        struct perf_event *event = info;
1236        struct perf_event_context *ctx = event->ctx;
1237        struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1238
1239        raw_spin_lock(&ctx->lock);
1240        event_sched_out(event, cpuctx, ctx);
1241        list_del_event(event, ctx);
1242        if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
1243                ctx->is_active = 0;
1244                cpuctx->task_ctx = NULL;
1245        }
1246        raw_spin_unlock(&ctx->lock);
1247
1248        return 0;
1249}
1250
1251
1252/*
1253 * Remove the event from a task's (or a CPU's) list of events.
1254 *
1255 * CPU events are removed with a smp call. For task events we only
1256 * call when the task is on a CPU.
1257 *
1258 * If event->ctx is a cloned context, callers must make sure that
1259 * every task struct that event->ctx->task could possibly point to
1260 * remains valid.  This is OK when called from perf_release since
1261 * that only calls us on the top-level context, which can't be a clone.
1262 * When called from perf_event_exit_task, it's OK because the
1263 * context has been detached from its task.
1264 */
1265static void perf_remove_from_context(struct perf_event *event)
1266{
1267        struct perf_event_context *ctx = event->ctx;
1268        struct task_struct *task = ctx->task;
1269
1270        lockdep_assert_held(&ctx->mutex);
1271
1272        if (!task) {
1273                /*
1274                 * Per cpu events are removed via an smp call and
1275                 * the removal is always successful.
1276                 */
1277                cpu_function_call(event->cpu, __perf_remove_from_context, event);
1278                return;
1279        }
1280
1281retry:
1282        if (!task_function_call(task, __perf_remove_from_context, event))
1283                return;
1284
1285        raw_spin_lock_irq(&ctx->lock);
1286        /*
1287         * If we failed to find a running task, but find the context active now
1288         * that we've acquired the ctx->lock, retry.
1289         */
1290        if (ctx->is_active) {
1291                raw_spin_unlock_irq(&ctx->lock);
1292                goto retry;
1293        }
1294
1295        /*
1296         * Since the task isn't running, its safe to remove the event, us
1297         * holding the ctx->lock ensures the task won't get scheduled in.
1298         */
1299        list_del_event(event, ctx);
1300        raw_spin_unlock_irq(&ctx->lock);
1301}
1302
1303/*
1304 * Cross CPU call to disable a performance event
1305 */
1306int __perf_event_disable(void *info)
1307{
1308        struct perf_event *event = info;
1309        struct perf_event_context *ctx = event->ctx;
1310        struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1311
1312        /*
1313         * If this is a per-task event, need to check whether this
1314         * event's task is the current task on this cpu.
1315         *
1316         * Can trigger due to concurrent perf_event_context_sched_out()
1317         * flipping contexts around.
1318         */
1319        if (ctx->task && cpuctx->task_ctx != ctx)
1320                return -EINVAL;
1321
1322        raw_spin_lock(&ctx->lock);
1323
1324        /*
1325         * If the event is on, turn it off.
1326         * If it is in error state, leave it in error state.
1327         */
1328        if (event->state >= PERF_EVENT_STATE_INACTIVE) {
1329                update_context_time(ctx);
1330                update_cgrp_time_from_event(event);
1331                update_group_times(event);
1332                if (event == event->group_leader)
1333                        group_sched_out(event, cpuctx, ctx);
1334                else
1335                        event_sched_out(event, cpuctx, ctx);
1336                event->state = PERF_EVENT_STATE_OFF;
1337        }
1338
1339        raw_spin_unlock(&ctx->lock);
1340
1341        return 0;
1342}
1343
1344/*
1345 * Disable a event.
1346 *
1347 * If event->ctx is a cloned context, callers must make sure that
1348 * every task struct that event->ctx->task could possibly point to
1349 * remains valid.  This condition is satisifed when called through
1350 * perf_event_for_each_child or perf_event_for_each because they
1351 * hold the top-level event's child_mutex, so any descendant that
1352 * goes to exit will block in sync_child_event.
1353 * When called from perf_pending_event it's OK because event->ctx
1354 * is the current context on this CPU and preemption is disabled,
1355 * hence we can't get into perf_event_task_sched_out for this context.
1356 */
1357void perf_event_disable(struct perf_event *event)
1358{
1359        struct perf_event_context *ctx = event->ctx;
1360        struct task_struct *task = ctx->task;
1361
1362        if (!task) {
1363                /*
1364                 * Disable the event on the cpu that it's on
1365                 */
1366                cpu_function_call(event->cpu, __perf_event_disable, event);
1367                return;
1368        }
1369
1370retry:
1371        if (!task_function_call(task, __perf_event_disable, event))
1372                return;
1373
1374        raw_spin_lock_irq(&ctx->lock);
1375        /*
1376         * If the event is still active, we need to retry the cross-call.
1377         */
1378        if (event->state == PERF_EVENT_STATE_ACTIVE) {
1379                raw_spin_unlock_irq(&ctx->lock);
1380                /*
1381                 * Reload the task pointer, it might have been changed by
1382                 * a concurrent perf_event_context_sched_out().
1383                 */
1384                task = ctx->task;
1385                goto retry;
1386        }
1387
1388        /*
1389         * Since we have the lock this context can't be scheduled
1390         * in, so we can change the state safely.
1391         */
1392        if (event->state == PERF_EVENT_STATE_INACTIVE) {
1393                update_group_times(event);
1394                event->state = PERF_EVENT_STATE_OFF;
1395        }
1396        raw_spin_unlock_irq(&ctx->lock);
1397}
1398EXPORT_SYMBOL_GPL(perf_event_disable);
1399
1400static void perf_set_shadow_time(struct perf_event *event,
1401                                 struct perf_event_context *ctx,
1402                                 u64 tstamp)
1403{
1404        /*
1405         * use the correct time source for the time snapshot
1406         *
1407         * We could get by without this by leveraging the
1408         * fact that to get to this function, the caller
1409         * has most likely already called update_context_time()
1410         * and update_cgrp_time_xx() and thus both timestamp
1411         * are identical (or very close). Given that tstamp is,
1412         * already adjusted for cgroup, we could say that:
1413         *    tstamp - ctx->timestamp
1414         * is equivalent to
1415         *    tstamp - cgrp->timestamp.
1416         *
1417         * Then, in perf_output_read(), the calculation would
1418         * work with no changes because:
1419         * - event is guaranteed scheduled in
1420         * - no scheduled out in between
1421         * - thus the timestamp would be the same
1422         *
1423         * But this is a bit hairy.
1424         *
1425         * So instead, we have an explicit cgroup call to remain
1426         * within the time time source all along. We believe it
1427         * is cleaner and simpler to understand.
1428         */
1429        if (is_cgroup_event(event))
1430                perf_cgroup_set_shadow_time(event, tstamp);
1431        else
1432                event->shadow_ctx_time = tstamp - ctx->timestamp;
1433}
1434
1435#define MAX_INTERRUPTS (~0ULL)
1436
1437static void perf_log_throttle(struct perf_event *event, int enable);
1438
1439static int
1440event_sched_in(struct perf_event *event,
1441                 struct perf_cpu_context *cpuctx,
1442                 struct perf_event_context *ctx)
1443{
1444        u64 tstamp = perf_event_time(event);
1445
1446        if (event->state <= PERF_EVENT_STATE_OFF)
1447                return 0;
1448
1449        event->state = PERF_EVENT_STATE_ACTIVE;
1450        event->oncpu = smp_processor_id();
1451
1452        /*
1453         * Unthrottle events, since we scheduled we might have missed several
1454         * ticks already, also for a heavily scheduling task there is little
1455         * guarantee it'll get a tick in a timely manner.
1456         */
1457        if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
1458                perf_log_throttle(event, 1);
1459                event->hw.interrupts = 0;
1460        }
1461
1462        /*
1463         * The new state must be visible before we turn it on in the hardware:
1464         */
1465        smp_wmb();
1466
1467        if (event->pmu->add(event, PERF_EF_START)) {
1468                event->state = PERF_EVENT_STATE_INACTIVE;
1469                event->oncpu = -1;
1470                return -EAGAIN;
1471        }
1472
1473        event->tstamp_running += tstamp - event->tstamp_stopped;
1474
1475        perf_set_shadow_time(event, ctx, tstamp);
1476
1477        if (!is_software_event(event))
1478                cpuctx->active_oncpu++;
1479        ctx->nr_active++;
1480        if (event->attr.freq && event->attr.sample_freq)
1481                ctx->nr_freq++;
1482
1483        if (event->attr.exclusive)
1484                cpuctx->exclusive = 1;
1485
1486        return 0;
1487}
1488
1489static int
1490group_sched_in(struct perf_event *group_event,
1491               struct perf_cpu_context *cpuctx,
1492               struct perf_event_context *ctx)
1493{
1494        struct perf_event *event, *partial_group = NULL;
1495        struct pmu *pmu = group_event->pmu;
1496        u64 now = ctx->time;
1497        bool simulate = false;
1498
1499        if (group_event->state == PERF_EVENT_STATE_OFF)
1500                return 0;
1501
1502        pmu->start_txn(pmu);
1503
1504        if (event_sched_in(group_event, cpuctx, ctx)) {
1505                pmu->cancel_txn(pmu);
1506                return -EAGAIN;
1507        }
1508
1509        /*
1510         * Schedule in siblings as one group (if any):
1511         */
1512        list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1513                if (event_sched_in(event, cpuctx, ctx)) {
1514                        partial_group = event;
1515                        goto group_error;
1516                }
1517        }
1518
1519        if (!pmu->commit_txn(pmu))
1520                return 0;
1521
1522group_error:
1523        /*
1524         * Groups can be scheduled in as one unit only, so undo any
1525         * partial group before returning:
1526         * The events up to the failed event are scheduled out normally,
1527         * tstamp_stopped will be updated.
1528         *
1529         * The failed events and the remaining siblings need to have
1530         * their timings updated as if they had gone thru event_sched_in()
1531         * and event_sched_out(). This is required to get consistent timings
1532         * across the group. This also takes care of the case where the group
1533         * could never be scheduled by ensuring tstamp_stopped is set to mark
1534         * the time the event was actually stopped, such that time delta
1535         * calculation in update_event_times() is correct.
1536         */
1537        list_for_each_entry(event, &group_event->sibling_list, group_entry) {
1538                if (event == partial_group)
1539                        simulate = true;
1540
1541                if (simulate) {
1542                        event->tstamp_running += now - event->tstamp_stopped;
1543                        event->tstamp_stopped = now;
1544                } else {
1545                        event_sched_out(event, cpuctx, ctx);
1546                }
1547        }
1548        event_sched_out(group_event, cpuctx, ctx);
1549
1550        pmu->cancel_txn(pmu);
1551
1552        return -EAGAIN;
1553}
1554
1555/*
1556 * Work out whether we can put this event group on the CPU now.
1557 */
1558static int group_can_go_on(struct perf_event *event,
1559                           struct perf_cpu_context *cpuctx,
1560                           int can_add_hw)
1561{
1562        /*
1563         * Groups consisting entirely of software events can always go on.
1564         */
1565        if (event->group_flags & PERF_GROUP_SOFTWARE)
1566                return 1;
1567        /*
1568         * If an exclusive group is already on, no other hardware
1569         * events can go on.
1570         */
1571        if (cpuctx->exclusive)
1572                return 0;
1573        /*
1574         * If this group is exclusive and there are already
1575         * events on the CPU, it can't go on.
1576         */
1577        if (event->attr.exclusive && cpuctx->active_oncpu)
1578                return 0;
1579        /*
1580         * Otherwise, try to add it if all previous groups were able
1581         * to go on.
1582         */
1583        return can_add_hw;
1584}
1585
1586static void add_event_to_ctx(struct perf_event *event,
1587                               struct perf_event_context *ctx)
1588{
1589        u64 tstamp = perf_event_time(event);
1590
1591        list_add_event(event, ctx);
1592        perf_group_attach(event);
1593        event->tstamp_enabled = tstamp;
1594        event->tstamp_running = tstamp;
1595        event->tstamp_stopped = tstamp;
1596}
1597
1598static void task_ctx_sched_out(struct perf_event_context *ctx);
1599static void
1600ctx_sched_in(struct perf_event_context *ctx,
1601             struct perf_cpu_context *cpuctx,
1602             enum event_type_t event_type,
1603             struct task_struct *task);
1604
1605static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
1606                                struct perf_event_context *ctx,
1607                                struct task_struct *task)
1608{
1609        cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
1610        if (ctx)
1611                ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
1612        cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
1613        if (ctx)
1614                ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
1615}
1616
1617/*
1618 * Cross CPU call to install and enable a performance event
1619 *
1620 * Must be called with ctx->mutex held
1621 */
1622static int  __perf_install_in_context(void *info)
1623{
1624        struct perf_event *event = info;
1625        struct perf_event_context *ctx = event->ctx;
1626        struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1627        struct perf_event_context *task_ctx = cpuctx->task_ctx;
1628        struct task_struct *task = current;
1629
1630        perf_ctx_lock(cpuctx, task_ctx);
1631        perf_pmu_disable(cpuctx->ctx.pmu);
1632
1633        /*
1634         * If there was an active task_ctx schedule it out.
1635         */
1636        if (task_ctx)
1637                task_ctx_sched_out(task_ctx);
1638
1639        /*
1640         * If the context we're installing events in is not the
1641         * active task_ctx, flip them.
1642         */
1643        if (ctx->task && task_ctx != ctx) {
1644                if (task_ctx)
1645                        raw_spin_unlock(&task_ctx->lock);
1646                raw_spin_lock(&ctx->lock);
1647                task_ctx = ctx;
1648        }
1649
1650        if (task_ctx) {
1651                cpuctx->task_ctx = task_ctx;
1652                task = task_ctx->task;
1653        }
1654
1655        cpu_ctx_sched_out(cpuctx, EVENT_ALL);
1656
1657        update_context_time(ctx);
1658        /*
1659         * update cgrp time only if current cgrp
1660         * matches event->cgrp. Must be done before
1661         * calling add_event_to_ctx()
1662         */
1663        update_cgrp_time_from_event(event);
1664
1665        add_event_to_ctx(event, ctx);
1666
1667        /*
1668         * Schedule everything back in
1669         */
1670        perf_event_sched_in(cpuctx, task_ctx, task);
1671
1672        perf_pmu_enable(cpuctx->ctx.pmu);
1673        perf_ctx_unlock(cpuctx, task_ctx);
1674
1675        return 0;
1676}
1677
1678/*
1679 * Attach a performance event to a context
1680 *
1681 * First we add the event to the list with the hardware enable bit
1682 * in event->hw_config cleared.
1683 *
1684 * If the event is attached to a task which is on a CPU we use a smp
1685 * call to enable it in the task context. The task might have been
1686 * scheduled away, but we check this in the smp call again.
1687 */
1688static void
1689perf_install_in_context(struct perf_event_context *ctx,
1690                        struct perf_event *event,
1691                        int cpu)
1692{
1693        struct task_struct *task = ctx->task;
1694
1695        lockdep_assert_held(&ctx->mutex);
1696
1697        event->ctx = ctx;
1698        if (event->cpu != -1)
1699                event->cpu = cpu;
1700
1701        if (!task) {
1702                /*
1703                 * Per cpu events are installed via an smp call and
1704                 * the install is always successful.
1705                 */
1706                cpu_function_call(cpu, __perf_install_in_context, event);
1707                return;
1708        }
1709
1710retry:
1711        if (!task_function_call(task, __perf_install_in_context, event))
1712                return;
1713
1714        raw_spin_lock_irq(&ctx->lock);
1715        /*
1716         * If we failed to find a running task, but find the context active now
1717         * that we've acquired the ctx->lock, retry.
1718         */
1719        if (ctx->is_active) {
1720                raw_spin_unlock_irq(&ctx->lock);
1721                goto retry;
1722        }
1723
1724        /*
1725         * Since the task isn't running, its safe to add the event, us holding
1726         * the ctx->lock ensures the task won't get scheduled in.
1727         */
1728        add_event_to_ctx(event, ctx);
1729        raw_spin_unlock_irq(&ctx->lock);
1730}
1731
1732/*
1733 * Put a event into inactive state and update time fields.
1734 * Enabling the leader of a group effectively enables all
1735 * the group members that aren't explicitly disabled, so we
1736 * have to update their ->tstamp_enabled also.
1737 * Note: this works for group members as well as group leaders
1738 * since the non-leader members' sibling_lists will be empty.
1739 */
1740static void __perf_event_mark_enabled(struct perf_event *event)
1741{
1742        struct perf_event *sub;
1743        u64 tstamp = perf_event_time(event);
1744
1745        event->state = PERF_EVENT_STATE_INACTIVE;
1746        event->tstamp_enabled = tstamp - event->total_time_enabled;
1747        list_for_each_entry(sub, &event->sibling_list, group_entry) {
1748                if (sub->state >= PERF_EVENT_STATE_INACTIVE)
1749                        sub->tstamp_enabled = tstamp - sub->total_time_enabled;
1750        }
1751}
1752
1753/*
1754 * Cross CPU call to enable a performance event
1755 */
1756static int __perf_event_enable(void *info)
1757{
1758        struct perf_event *event = info;
1759        struct perf_event_context *ctx = event->ctx;
1760        struct perf_event *leader = event->group_leader;
1761        struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1762        int err;
1763
1764        if (WARN_ON_ONCE(!ctx->is_active))
1765                return -EINVAL;
1766
1767        raw_spin_lock(&ctx->lock);
1768        update_context_time(ctx);
1769
1770        if (event->state >= PERF_EVENT_STATE_INACTIVE)
1771                goto unlock;
1772
1773        /*
1774         * set current task's cgroup time reference point
1775         */
1776        perf_cgroup_set_timestamp(current, ctx);
1777
1778        __perf_event_mark_enabled(event);
1779
1780        if (!event_filter_match(event)) {
1781                if (is_cgroup_event(event))
1782                        perf_cgroup_defer_enabled(event);
1783                goto unlock;
1784        }
1785
1786        /*
1787         * If the event is in a group and isn't the group leader,
1788         * then don't put it on unless the group is on.
1789         */
1790        if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
1791                goto unlock;
1792
1793        if (!group_can_go_on(event, cpuctx, 1)) {
1794                err = -EEXIST;
1795        } else {
1796                if (event == leader)
1797                        err = group_sched_in(event, cpuctx, ctx);
1798                else
1799                        err = event_sched_in(event, cpuctx, ctx);
1800        }
1801
1802        if (err) {
1803                /*
1804                 * If this event can't go on and it's part of a
1805                 * group, then the whole group has to come off.
1806                 */
1807                if (leader != event)
1808                        group_sched_out(leader, cpuctx, ctx);
1809                if (leader->attr.pinned) {
1810                        update_group_times(leader);
1811                        leader->state = PERF_EVENT_STATE_ERROR;
1812                }
1813        }
1814
1815unlock:
1816        raw_spin_unlock(&ctx->lock);
1817
1818        return 0;
1819}
1820
1821/*
1822 * Enable a event.
1823 *
1824 * If event->ctx is a cloned context, callers must make sure that
1825 * every task struct that event->ctx->task could possibly point to
1826 * remains valid.  This condition is satisfied when called through
1827 * perf_event_for_each_child or perf_event_for_each as described
1828 * for perf_event_disable.
1829 */
1830void perf_event_enable(struct perf_event *event)
1831{
1832        struct perf_event_context *ctx = event->ctx;
1833        struct task_struct *task = ctx->task;
1834
1835        if (!task) {
1836                /*
1837                 * Enable the event on the cpu that it's on
1838                 */
1839                cpu_function_call(event->cpu, __perf_event_enable, event);
1840                return;
1841        }
1842
1843        raw_spin_lock_irq(&ctx->lock);
1844        if (event->state >= PERF_EVENT_STATE_INACTIVE)
1845                goto out;
1846
1847        /*
1848         * If the event is in error state, clear that first.
1849         * That way, if we see the event in error state below, we
1850         * know that it has gone back into error state, as distinct
1851         * from the task having been scheduled away before the
1852         * cross-call arrived.
1853         */
1854        if (event->state == PERF_EVENT_STATE_ERROR)
1855                event->state = PERF_EVENT_STATE_OFF;
1856
1857retry:
1858        if (!ctx->is_active) {
1859                __perf_event_mark_enabled(event);
1860                goto out;
1861        }
1862
1863        raw_spin_unlock_irq(&ctx->lock);
1864
1865        if (!task_function_call(task, __perf_event_enable, event))
1866                return;
1867
1868        raw_spin_lock_irq(&ctx->lock);
1869
1870        /*
1871         * If the context is active and the event is still off,
1872         * we need to retry the cross-call.
1873         */
1874        if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
1875                /*
1876                 * task could have been flipped by a concurrent
1877                 * perf_event_context_sched_out()
1878                 */
1879                task = ctx->task;
1880                goto retry;
1881        }
1882
1883out:
1884        raw_spin_unlock_irq(&ctx->lock);
1885}
1886EXPORT_SYMBOL_GPL(perf_event_enable);
1887
1888int perf_event_refresh(struct perf_event *event, int refresh)
1889{
1890        /*
1891         * not supported on inherited events
1892         */
1893        if (event->attr.inherit || !is_sampling_event(event))
1894                return -EINVAL;
1895
1896        atomic_add(refresh, &event->event_limit);
1897        perf_event_enable(event);
1898
1899        return 0;
1900}
1901EXPORT_SYMBOL_GPL(perf_event_refresh);
1902
1903static void ctx_sched_out(struct perf_event_context *ctx,
1904                          struct perf_cpu_context *cpuctx,
1905                          enum event_type_t event_type)
1906{
1907        struct perf_event *event;
1908        int is_active = ctx->is_active;
1909
1910        ctx->is_active &= ~event_type;
1911        if (likely(!ctx->nr_events))
1912                return;
1913
1914        update_context_time(ctx);
1915        update_cgrp_time_from_cpuctx(cpuctx);
1916        if (!ctx->nr_active)
1917                return;
1918
1919        perf_pmu_disable(ctx->pmu);
1920        if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
1921                list_for_each_entry(event, &ctx->pinned_groups, group_entry)
1922                        group_sched_out(event, cpuctx, ctx);
1923        }
1924
1925        if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
1926                list_for_each_entry(event, &ctx->flexible_groups, group_entry)
1927                        group_sched_out(event, cpuctx, ctx);
1928        }
1929        perf_pmu_enable(ctx->pmu);
1930}
1931
1932/*
1933 * Test whether two contexts are equivalent, i.e. whether they
1934 * have both been cloned from the same version of the same context
1935 * and they both have the same number of enabled events.
1936 * If the number of enabled events is the same, then the set
1937 * of enabled events should be the same, because these are both
1938 * inherited contexts, therefore we can't access individual events
1939 * in them directly with an fd; we can only enable/disable all
1940 * events via prctl, or enable/disable all events in a family
1941 * via ioctl, which will have the same effect on both contexts.
1942 */
1943static int context_equiv(struct perf_event_context *ctx1,
1944                         struct perf_event_context *ctx2)
1945{
1946        return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
1947                && ctx1->parent_gen == ctx2->parent_gen
1948                && !ctx1->pin_count && !ctx2->pin_count;
1949}
1950
1951static void __perf_event_sync_stat(struct perf_event *event,
1952                                     struct perf_event *next_event)
1953{
1954        u64 value;
1955
1956        if (!event->attr.inherit_stat)
1957                return;
1958
1959        /*
1960         * Update the event value, we cannot use perf_event_read()
1961         * because we're in the middle of a context switch and have IRQs
1962         * disabled, which upsets smp_call_function_single(), however
1963         * we know the event must be on the current CPU, therefore we
1964         * don't need to use it.
1965         */
1966        switch (event->state) {
1967        case PERF_EVENT_STATE_ACTIVE:
1968                event->pmu->read(event);
1969                /* fall-through */
1970
1971        case PERF_EVENT_STATE_INACTIVE:
1972                update_event_times(event);
1973                break;
1974
1975        default:
1976                break;
1977        }
1978
1979        /*
1980         * In order to keep per-task stats reliable we need to flip the event
1981         * values when we flip the contexts.
1982         */
1983        value = local64_read(&next_event->count);
1984        value = local64_xchg(&event->count, value);
1985        local64_set(&next_event->count, value);
1986
1987        swap(event->total_time_enabled, next_event->total_time_enabled);
1988        swap(event->total_time_running, next_event->total_time_running);
1989
1990        /*
1991         * Since we swizzled the values, update the user visible data too.
1992         */
1993        perf_event_update_userpage(event);
1994        perf_event_update_userpage(next_event);
1995}
1996
1997#define list_next_entry(pos, member) \
1998        list_entry(pos->member.next, typeof(*pos), member)
1999
2000static void perf_event_sync_stat(struct perf_event_context *ctx,
2001                                   struct perf_event_context *next_ctx)
2002{
2003        struct perf_event *event, *next_event;
2004
2005        if (!ctx->nr_stat)
2006                return;
2007
2008        update_context_time(ctx);
2009
2010        event = list_first_entry(&ctx->event_list,
2011                                   struct perf_event, event_entry);
2012
2013        next_event = list_first_entry(&next_ctx->event_list,
2014                                        struct perf_event, event_entry);
2015
2016        while (&event->event_entry != &ctx->event_list &&
2017               &next_event->event_entry != &next_ctx->event_list) {
2018
2019                __perf_event_sync_stat(event, next_event);
2020
2021                event = list_next_entry(event, event_entry);
2022                next_event = list_next_entry(next_event, event_entry);
2023        }
2024}
2025
2026static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2027                                         struct task_struct *next)
2028{
2029        struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
2030        struct perf_event_context *next_ctx;
2031        struct perf_event_context *parent;
2032        struct perf_cpu_context *cpuctx;
2033        int do_switch = 1;
2034
2035        if (likely(!ctx))
2036                return;
2037
2038        cpuctx = __get_cpu_context(ctx);
2039        if (!cpuctx->task_ctx)
2040                return;
2041
2042        rcu_read_lock();
2043        parent = rcu_dereference(ctx->parent_ctx);
2044        next_ctx = next->perf_event_ctxp[ctxn];
2045        if (parent && next_ctx &&
2046            rcu_dereference(next_ctx->parent_ctx) == parent) {
2047                /*
2048                 * Looks like the two contexts are clones, so we might be
2049                 * able to optimize the context switch.  We lock both
2050                 * contexts and check that they are clones under the
2051                 * lock (including re-checking that neither has been
2052                 * uncloned in the meantime).  It doesn't matter which
2053                 * order we take the locks because no other cpu could
2054                 * be trying to lock both of these tasks.
2055                 */
2056                raw_spin_lock(&ctx->lock);
2057                raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
2058                if (context_equiv(ctx, next_ctx)) {
2059                        /*
2060                         * XXX do we need a memory barrier of sorts
2061                         * wrt to rcu_dereference() of perf_event_ctxp
2062                         */
2063                        task->perf_event_ctxp[ctxn] = next_ctx;
2064                        next->perf_event_ctxp[ctxn] = ctx;
2065                        ctx->task = next;
2066                        next_ctx->task = task;
2067                        do_switch = 0;
2068
2069                        perf_event_sync_stat(ctx, next_ctx);
2070                }
2071                raw_spin_unlock(&next_ctx->lock);
2072                raw_spin_unlock(&ctx->lock);
2073        }
2074        rcu_read_unlock();
2075
2076        if (do_switch) {
2077                raw_spin_lock(&ctx->lock);
2078                ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2079                cpuctx->task_ctx = NULL;
2080                raw_spin_unlock(&ctx->lock);
2081        }
2082}
2083
2084#define for_each_task_context_nr(ctxn)                                  \
2085        for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
2086
2087/*
2088 * Called from scheduler to remove the events of the current task,
2089 * with interrupts disabled.
2090 *
2091 * We stop each event and update the event value in event->count.
2092 *
2093 * This does not protect us against NMI, but disable()
2094 * sets the disabled bit in the control field of event _before_
2095 * accessing the event control register. If a NMI hits, then it will
2096 * not restart the event.
2097 */
2098void __perf_event_task_sched_out(struct task_struct *task,
2099                                 struct task_struct *next)
2100{
2101        int ctxn;
2102
2103        for_each_task_context_nr(ctxn)
2104                perf_event_context_sched_out(task, ctxn, next);
2105
2106        /*
2107         * if cgroup events exist on this CPU, then we need
2108         * to check if we have to switch out PMU state.
2109         * cgroup event are system-wide mode only
2110         */
2111        if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2112                perf_cgroup_sched_out(task, next);
2113}
2114
2115static void task_ctx_sched_out(struct perf_event_context *ctx)
2116{
2117        struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2118
2119        if (!cpuctx->task_ctx)
2120                return;
2121
2122        if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2123                return;
2124
2125        ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2126        cpuctx->task_ctx = NULL;
2127}
2128
2129/*
2130 * Called with IRQs disabled
2131 */
2132static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
2133                              enum event_type_t event_type)
2134{
2135        ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
2136}
2137
2138static void
2139ctx_pinned_sched_in(struct perf_event_context *ctx,
2140                    struct perf_cpu_context *cpuctx)
2141{
2142        struct perf_event *event;
2143
2144        list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2145                if (event->state <= PERF_EVENT_STATE_OFF)
2146                        continue;
2147                if (!event_filter_match(event))
2148                        continue;
2149
2150                /* may need to reset tstamp_enabled */
2151                if (is_cgroup_event(event))
2152                        perf_cgroup_mark_enabled(event, ctx);
2153
2154                if (group_can_go_on(event, cpuctx, 1))
2155                        group_sched_in(event, cpuctx, ctx);
2156
2157                /*
2158                 * If this pinned group hasn't been scheduled,
2159                 * put it in error state.
2160                 */
2161                if (event->state == PERF_EVENT_STATE_INACTIVE) {
2162                        update_group_times(event);
2163                        event->state = PERF_EVENT_STATE_ERROR;
2164                }
2165        }
2166}
2167
2168static void
2169ctx_flexible_sched_in(struct perf_event_context *ctx,
2170                      struct perf_cpu_context *cpuctx)
2171{
2172        struct perf_event *event;
2173        int can_add_hw = 1;
2174
2175        list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
2176                /* Ignore events in OFF or ERROR state */
2177                if (event->state <= PERF_EVENT_STATE_OFF)
2178                        continue;
2179                /*
2180                 * Listen to the 'cpu' scheduling filter constraint
2181                 * of events:
2182                 */
2183                if (!event_filter_match(event))
2184                        continue;
2185
2186                /* may need to reset tstamp_enabled */
2187                if (is_cgroup_event(event))
2188                        perf_cgroup_mark_enabled(event, ctx);
2189
2190                if (group_can_go_on(event, cpuctx, can_add_hw)) {
2191                        if (group_sched_in(event, cpuctx, ctx))
2192                                can_add_hw = 0;
2193                }
2194        }
2195}
2196
2197static void
2198ctx_sched_in(struct perf_event_context *ctx,
2199             struct perf_cpu_context *cpuctx,
2200             enum event_type_t event_type,
2201             struct task_struct *task)
2202{
2203        u64 now;
2204        int is_active = ctx->is_active;
2205
2206        ctx->is_active |= event_type;
2207        if (likely(!ctx->nr_events))
2208                return;
2209
2210        now = perf_clock();
2211        ctx->timestamp = now;
2212        perf_cgroup_set_timestamp(task, ctx);
2213        /*
2214         * First go through the list and put on any pinned groups
2215         * in order to give them the best chance of going on.
2216         */
2217        if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
2218                ctx_pinned_sched_in(ctx, cpuctx);
2219
2220        /* Then walk through the lower prio flexible groups */
2221        if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
2222                ctx_flexible_sched_in(ctx, cpuctx);
2223}
2224
2225static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
2226                             enum event_type_t event_type,
2227                             struct task_struct *task)
2228{
2229        struct perf_event_context *ctx = &cpuctx->ctx;
2230
2231        ctx_sched_in(ctx, cpuctx, event_type, task);
2232}
2233
2234static void perf_event_context_sched_in(struct perf_event_context *ctx,
2235                                        struct task_struct *task)
2236{
2237        struct perf_cpu_context *cpuctx;
2238
2239        cpuctx = __get_cpu_context(ctx);
2240        if (cpuctx->task_ctx == ctx)
2241                return;
2242
2243        perf_ctx_lock(cpuctx, ctx);
2244        perf_pmu_disable(ctx->pmu);
2245        /*
2246         * We want to keep the following priority order:
2247         * cpu pinned (that don't need to move), task pinned,
2248         * cpu flexible, task flexible.
2249         */
2250        cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2251
2252        if (ctx->nr_events)
2253                cpuctx->task_ctx = ctx;
2254
2255        perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
2256
2257        perf_pmu_enable(ctx->pmu);
2258        perf_ctx_unlock(cpuctx, ctx);
2259
2260        /*
2261         * Since these rotations are per-cpu, we need to ensure the
2262         * cpu-context we got scheduled on is actually rotating.
2263         */
2264        perf_pmu_rotate_start(ctx->pmu);
2265}
2266
2267/*
2268 * When sampling the branck stack in system-wide, it may be necessary
2269 * to flush the stack on context switch. This happens when the branch
2270 * stack does not tag its entries with the pid of the current task.
2271 * Otherwise it becomes impossible to associate a branch entry with a
2272 * task. This ambiguity is more likely to appear when the branch stack
2273 * supports priv level filtering and the user sets it to monitor only
2274 * at the user level (which could be a useful measurement in system-wide
2275 * mode). In that case, the risk is high of having a branch stack with
2276 * branch from multiple tasks. Flushing may mean dropping the existing
2277 * entries or stashing them somewhere in the PMU specific code layer.
2278 *
2279 * This function provides the context switch callback to the lower code
2280 * layer. It is invoked ONLY when there is at least one system-wide context
2281 * with at least one active event using taken branch sampling.
2282 */
2283static void perf_branch_stack_sched_in(struct task_struct *prev,
2284                                       struct task_struct *task)
2285{
2286        struct perf_cpu_context *cpuctx;
2287        struct pmu *pmu;
2288        unsigned long flags;
2289
2290        /* no need to flush branch stack if not changing task */
2291        if (prev == task)
2292                return;
2293
2294        local_irq_save(flags);
2295
2296        rcu_read_lock();
2297
2298        list_for_each_entry_rcu(pmu, &pmus, entry) {
2299                cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
2300
2301                /*
2302                 * check if the context has at least one
2303                 * event using PERF_SAMPLE_BRANCH_STACK
2304                 */
2305                if (cpuctx->ctx.nr_branch_stack > 0
2306                    && pmu->flush_branch_stack) {
2307
2308                        pmu = cpuctx->ctx.pmu;
2309
2310                        perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2311
2312                        perf_pmu_disable(pmu);
2313
2314                        pmu->flush_branch_stack();
2315
2316                        perf_pmu_enable(pmu);
2317
2318                        perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2319                }
2320        }
2321
2322        rcu_read_unlock();
2323
2324        local_irq_restore(flags);
2325}
2326
2327/*
2328 * Called from scheduler to add the events of the current task
2329 * with interrupts disabled.
2330 *
2331 * We restore the event value and then enable it.
2332 *
2333 * This does not protect us against NMI, but enable()
2334 * sets the enabled bit in the control field of event _before_
2335 * accessing the event control register. If a NMI hits, then it will
2336 * keep the event running.
2337 */
2338void __perf_event_task_sched_in(struct task_struct *prev,
2339                                struct task_struct *task)
2340{
2341        struct perf_event_context *ctx;
2342        int ctxn;
2343
2344        for_each_task_context_nr(ctxn) {
2345                ctx = task->perf_event_ctxp[ctxn];
2346                if (likely(!ctx))
2347                        continue;
2348
2349                perf_event_context_sched_in(ctx, task);
2350        }
2351        /*
2352         * if cgroup events exist on this CPU, then we need
2353         * to check if we have to switch in PMU state.
2354         * cgroup event are system-wide mode only
2355         */
2356        if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
2357                perf_cgroup_sched_in(prev, task);
2358
2359        /* check for system-wide branch_stack events */
2360        if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
2361                perf_branch_stack_sched_in(prev, task);
2362}
2363
2364static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
2365{
2366        u64 frequency = event->attr.sample_freq;
2367        u64 sec = NSEC_PER_SEC;
2368        u64 divisor, dividend;
2369
2370        int count_fls, nsec_fls, frequency_fls, sec_fls;
2371
2372        count_fls = fls64(count);
2373        nsec_fls = fls64(nsec);
2374        frequency_fls = fls64(frequency);
2375        sec_fls = 30;
2376
2377        /*
2378         * We got @count in @nsec, with a target of sample_freq HZ
2379         * the target period becomes:
2380         *
2381         *             @count * 10^9
2382         * period = -------------------
2383         *          @nsec * sample_freq
2384         *
2385         */
2386
2387        /*
2388         * Reduce accuracy by one bit such that @a and @b converge
2389         * to a similar magnitude.
2390         */
2391#define REDUCE_FLS(a, b)                \
2392do {                                    \
2393        if (a##_fls > b##_fls) {        \
2394                a >>= 1;                \
2395                a##_fls--;              \
2396        } else {                        \
2397                b >>= 1;                \
2398                b##_fls--;              \
2399        }                               \
2400} while (0)
2401
2402        /*
2403         * Reduce accuracy until either term fits in a u64, then proceed with
2404         * the other, so that finally we can do a u64/u64 division.
2405         */
2406        while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
2407                REDUCE_FLS(nsec, frequency);
2408                REDUCE_FLS(sec, count);
2409        }
2410
2411        if (count_fls + sec_fls > 64) {
2412                divisor = nsec * frequency;
2413
2414                while (count_fls + sec_fls > 64) {
2415                        REDUCE_FLS(count, sec);
2416                        divisor >>= 1;
2417                }
2418
2419                dividend = count * sec;
2420        } else {
2421                dividend = count * sec;
2422
2423                while (nsec_fls + frequency_fls > 64) {
2424                        REDUCE_FLS(nsec, frequency);
2425                        dividend >>= 1;
2426                }
2427
2428                divisor = nsec * frequency;
2429        }
2430
2431        if (!divisor)
2432                return dividend;
2433
2434        return div64_u64(dividend, divisor);
2435}
2436
2437static DEFINE_PER_CPU(int, perf_throttled_count);
2438static DEFINE_PER_CPU(u64, perf_throttled_seq);
2439
2440static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
2441{
2442        struct hw_perf_event *hwc = &event->hw;
2443        s64 period, sample_period;
2444        s64 delta;
2445
2446        period = perf_calculate_period(event, nsec, count);
2447
2448        delta = (s64)(period - hwc->sample_period);
2449        delta = (delta + 7) / 8; /* low pass filter */
2450
2451        sample_period = hwc->sample_period + delta;
2452
2453        if (!sample_period)
2454                sample_period = 1;
2455
2456        hwc->sample_period = sample_period;
2457
2458        if (local64_read(&hwc->period_left) > 8*sample_period) {
2459                if (disable)
2460                        event->pmu->stop(event, PERF_EF_UPDATE);
2461
2462                local64_set(&hwc->period_left, 0);
2463
2464                if (disable)
2465                        event->pmu->start(event, PERF_EF_RELOAD);
2466        }
2467}
2468
2469/*
2470 * combine freq adjustment with unthrottling to avoid two passes over the
2471 * events. At the same time, make sure, having freq events does not change
2472 * the rate of unthrottling as that would introduce bias.
2473 */
2474static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
2475                                           int needs_unthr)
2476{
2477        struct perf_event *event;
2478        struct hw_perf_event *hwc;
2479        u64 now, period = TICK_NSEC;
2480        s64 delta;
2481
2482        /*
2483         * only need to iterate over all events iff:
2484         * - context have events in frequency mode (needs freq adjust)
2485         * - there are events to unthrottle on this cpu
2486         */
2487        if (!(ctx->nr_freq || needs_unthr))
2488                return;
2489
2490        raw_spin_lock(&ctx->lock);
2491        perf_pmu_disable(ctx->pmu);
2492
2493        list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
2494                if (event->state != PERF_EVENT_STATE_ACTIVE)
2495                        continue;
2496
2497                if (!event_filter_match(event))
2498                        continue;
2499
2500                hwc = &event->hw;
2501
2502                if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) {
2503                        hwc->interrupts = 0;
2504                        perf_log_throttle(event, 1);
2505                        event->pmu->start(event, 0);
2506                }
2507
2508                if (!event->attr.freq || !event->attr.sample_freq)
2509                        continue;
2510
2511                /*
2512                 * stop the event and update event->count
2513                 */
2514                event->pmu->stop(event, PERF_EF_UPDATE);
2515
2516                now = local64_read(&event->count);
2517                delta = now - hwc->freq_count_stamp;
2518                hwc->freq_count_stamp = now;
2519
2520                /*
2521                 * restart the event
2522                 * reload only if value has changed
2523                 * we have stopped the event so tell that
2524                 * to perf_adjust_period() to avoid stopping it
2525                 * twice.
2526                 */
2527                if (delta > 0)
2528                        perf_adjust_period(event, period, delta, false);
2529
2530                event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
2531        }
2532
2533        perf_pmu_enable(ctx->pmu);
2534        raw_spin_unlock(&ctx->lock);
2535}
2536
2537/*
2538 * Round-robin a context's events:
2539 */
2540static void rotate_ctx(struct perf_event_context *ctx)
2541{
2542        /*
2543         * Rotate the first entry last of non-pinned groups. Rotation might be
2544         * disabled by the inheritance code.
2545         */
2546        if (!ctx->rotate_disable)
2547                list_rotate_left(&ctx->flexible_groups);
2548}
2549
2550/*
2551 * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
2552 * because they're strictly cpu affine and rotate_start is called with IRQs
2553 * disabled, while rotate_context is called from IRQ context.
2554 */
2555static void perf_rotate_context(struct perf_cpu_context *cpuctx)
2556{
2557        struct perf_event_context *ctx = NULL;
2558        int rotate = 0, remove = 1;
2559
2560        if (cpuctx->ctx.nr_events) {
2561                remove = 0;
2562                if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
2563                        rotate = 1;
2564        }
2565
2566        ctx = cpuctx->task_ctx;
2567        if (ctx && ctx->nr_events) {
2568                remove = 0;
2569                if (ctx->nr_events != ctx->nr_active)
2570                        rotate = 1;
2571        }
2572
2573        if (!rotate)
2574                goto done;
2575
2576        perf_ctx_lock(cpuctx, cpuctx->task_ctx);
2577        perf_pmu_disable(cpuctx->ctx.pmu);
2578
2579        cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2580        if (ctx)
2581                ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
2582
2583        rotate_ctx(&cpuctx->ctx);
2584        if (ctx)
2585                rotate_ctx(ctx);
2586
2587        perf_event_sched_in(cpuctx, ctx, current);
2588
2589        perf_pmu_enable(cpuctx->ctx.pmu);
2590        perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
2591done:
2592        if (remove)
2593                list_del_init(&cpuctx->rotation_list);
2594}
2595
2596#ifdef CONFIG_NO_HZ_FULL
2597bool perf_event_can_stop_tick(void)
2598{
2599        if (list_empty(&__get_cpu_var(rotation_list)))
2600                return true;
2601        else
2602                return false;
2603}
2604#endif
2605
2606void perf_event_task_tick(void)
2607{
2608        struct list_head *head = &__get_cpu_var(rotation_list);
2609        struct perf_cpu_context *cpuctx, *tmp;
2610        struct perf_event_context *ctx;
2611        int throttled;
2612
2613        WARN_ON(!irqs_disabled());
2614
2615        __this_cpu_inc(perf_throttled_seq);
2616        throttled = __this_cpu_xchg(perf_throttled_count, 0);
2617
2618        list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
2619                ctx = &cpuctx->ctx;
2620                perf_adjust_freq_unthr_context(ctx, throttled);
2621
2622                ctx = cpuctx->task_ctx;
2623                if (ctx)
2624                        perf_adjust_freq_unthr_context(ctx, throttled);
2625
2626                if (cpuctx->jiffies_interval == 1 ||
2627                                !(jiffies % cpuctx->jiffies_interval))
2628                        perf_rotate_context(cpuctx);
2629        }
2630}
2631
2632static int event_enable_on_exec(struct perf_event *event,
2633                                struct perf_event_context *ctx)
2634{
2635        if (!event->attr.enable_on_exec)
2636                return 0;
2637
2638        event->attr.enable_on_exec = 0;
2639        if (event->state >= PERF_EVENT_STATE_INACTIVE)
2640                return 0;
2641
2642        __perf_event_mark_enabled(event);
2643
2644        return 1;
2645}
2646
2647/*
2648 * Enable all of a task's events that have been marked enable-on-exec.
2649 * This expects task == current.
2650 */
2651static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2652{
2653        struct perf_event *event;
2654        unsigned long flags;
2655        int enabled = 0;
2656        int ret;
2657
2658        local_irq_save(flags);
2659        if (!ctx || !ctx->nr_events)
2660                goto out;
2661
2662        /*
2663         * We must ctxsw out cgroup events to avoid conflict
2664         * when invoking perf_task_event_sched_in() later on
2665         * in this function. Otherwise we end up trying to
2666         * ctxswin cgroup events which are already scheduled
2667         * in.
2668         */
2669        perf_cgroup_sched_out(current, NULL);
2670
2671        raw_spin_lock(&ctx->lock);
2672        task_ctx_sched_out(ctx);
2673
2674        list_for_each_entry(event, &ctx->event_list, event_entry) {
2675                ret = event_enable_on_exec(event, ctx);
2676                if (ret)
2677                        enabled = 1;
2678        }
2679
2680        /*
2681         * Unclone this context if we enabled any event.
2682         */
2683        if (enabled)
2684                unclone_ctx(ctx);
2685
2686        raw_spin_unlock(&ctx->lock);
2687
2688        /*
2689         * Also calls ctxswin for cgroup events, if any:
2690         */
2691        perf_event_context_sched_in(ctx, ctx->task);
2692out:
2693        local_irq_restore(flags);
2694}
2695
2696/*
2697 * Cross CPU call to read the hardware event
2698 */
2699static void __perf_event_read(void *info)
2700{
2701        struct perf_event *event = info;
2702        struct perf_event_context *ctx = event->ctx;
2703        struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
2704
2705        /*
2706         * If this is a task context, we need to check whether it is
2707         * the current task context of this cpu.  If not it has been
2708         * scheduled out before the smp call arrived.  In that case
2709         * event->count would have been updated to a recent sample
2710         * when the event was scheduled out.
2711         */
2712        if (ctx->task && cpuctx->task_ctx != ctx)
2713                return;
2714
2715        raw_spin_lock(&ctx->lock);
2716        if (ctx->is_active) {
2717                update_context_time(ctx);
2718                update_cgrp_time_from_event(event);
2719        }
2720        update_event_times(event);
2721        if (event->state == PERF_EVENT_STATE_ACTIVE)
2722                event->pmu->read(event);
2723        raw_spin_unlock(&ctx->lock);
2724}
2725
2726static inline u64 perf_event_count(struct perf_event *event)
2727{
2728        return local64_read(&event->count) + atomic64_read(&event->child_count);
2729}
2730
2731static u64 perf_event_read(struct perf_event *event)
2732{
2733        /*
2734         * If event is enabled and currently active on a CPU, update the
2735         * value in the event structure:
2736         */
2737        if (event->state == PERF_EVENT_STATE_ACTIVE) {
2738                smp_call_function_single(event->oncpu,
2739                                         __perf_event_read, event, 1);
2740        } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
2741                struct perf_event_context *ctx = event->ctx;
2742                unsigned long flags;
2743
2744                raw_spin_lock_irqsave(&ctx->lock, flags);
2745                /*
2746                 * may read while context is not active
2747                 * (e.g., thread is blocked), in that case
2748                 * we cannot update context time
2749                 */
2750                if (ctx->is_active) {
2751                        update_context_time(ctx);
2752                        update_cgrp_time_from_event(event);
2753                }
2754                update_event_times(event);
2755                raw_spin_unlock_irqrestore(&ctx->lock, flags);
2756        }
2757
2758        return perf_event_count(event);
2759}
2760
2761/*
2762 * Initialize the perf_event context in a task_struct:
2763 */
2764static void __perf_event_init_context(struct perf_event_context *ctx)
2765{
2766        raw_spin_lock_init(&ctx->lock);
2767        mutex_init(&ctx->mutex);
2768        INIT_LIST_HEAD(&ctx->pinned_groups);
2769        INIT_LIST_HEAD(&ctx->flexible_groups);
2770        INIT_LIST_HEAD(&ctx->event_list);
2771        atomic_set(&ctx->refcount, 1);
2772}
2773
2774static struct perf_event_context *
2775alloc_perf_context(struct pmu *pmu, struct task_struct *task)
2776{
2777        struct perf_event_context *ctx;
2778
2779        ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
2780        if (!ctx)
2781                return NULL;
2782
2783        __perf_event_init_context(ctx);
2784        if (task) {
2785                ctx->task = task;
2786                get_task_struct(task);
2787        }
2788        ctx->pmu = pmu;
2789
2790        return ctx;
2791}
2792
2793static struct task_struct *
2794find_lively_task_by_vpid(pid_t vpid)
2795{
2796        struct task_struct *task;
2797        int err;
2798
2799        rcu_read_lock();
2800        if (!vpid)
2801                task = current;
2802        else
2803                task = find_task_by_vpid(vpid);
2804        if (task)
2805                get_task_struct(task);
2806        rcu_read_unlock();
2807
2808        if (!task)
2809                return ERR_PTR(-ESRCH);
2810
2811        /* Reuse ptrace permission checks for now. */
2812        err = -EACCES;
2813        if (!ptrace_may_access(task, PTRACE_MODE_READ))
2814                goto errout;
2815
2816        return task;
2817errout:
2818        put_task_struct(task);
2819        return ERR_PTR(err);
2820
2821}
2822
2823/*
2824 * Returns a matching context with refcount and pincount.
2825 */
2826static struct perf_event_context *
2827find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
2828{
2829        struct perf_event_context *ctx;
2830        struct perf_cpu_context *cpuctx;
2831        unsigned long flags;
2832        int ctxn, err;
2833
2834        if (!task) {
2835                /* Must be root to operate on a CPU event: */
2836                if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
2837                        return ERR_PTR(-EACCES);
2838
2839                /*
2840                 * We could be clever and allow to attach a event to an
2841                 * offline CPU and activate it when the CPU comes up, but
2842                 * that's for later.
2843                 */
2844                if (!cpu_online(cpu))
2845                        return ERR_PTR(-ENODEV);
2846
2847                cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
2848                ctx = &cpuctx->ctx;
2849                get_ctx(ctx);
2850                ++ctx->pin_count;
2851
2852                return ctx;
2853        }
2854
2855        err = -EINVAL;
2856        ctxn = pmu->task_ctx_nr;
2857        if (ctxn < 0)
2858                goto errout;
2859
2860retry:
2861        ctx = perf_lock_task_context(task, ctxn, &flags);
2862        if (ctx) {
2863                unclone_ctx(ctx);
2864                ++ctx->pin_count;
2865                raw_spin_unlock_irqrestore(&ctx->lock, flags);
2866        } else {
2867                ctx = alloc_perf_context(pmu, task);
2868                err = -ENOMEM;
2869                if (!ctx)
2870                        goto errout;
2871
2872                err = 0;
2873                mutex_lock(&task->perf_event_mutex);
2874                /*
2875                 * If it has already passed perf_event_exit_task().
2876                 * we must see PF_EXITING, it takes this mutex too.
2877                 */
2878                if (task->flags & PF_EXITING)
2879                        err = -ESRCH;
2880                else if (task->perf_event_ctxp[ctxn])
2881                        err = -EAGAIN;
2882                else {
2883                        get_ctx(ctx);
2884                        ++ctx->pin_count;
2885                        rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
2886                }
2887                mutex_unlock(&task->perf_event_mutex);
2888
2889                if (unlikely(err)) {
2890                        put_ctx(ctx);
2891
2892                        if (err == -EAGAIN)
2893                                goto retry;
2894                        goto errout;
2895                }
2896        }
2897
2898        return ctx;
2899
2900errout:
2901        return ERR_PTR(err);
2902}
2903
2904static void perf_event_free_filter(struct perf_event *event);
2905
2906static void free_event_rcu(struct rcu_head *head)
2907{
2908        struct perf_event *event;
2909
2910        event = container_of(head, struct perf_event, rcu_head);
2911        if (event->ns)
2912                put_pid_ns(event->ns);
2913        perf_event_free_filter(event);
2914        kfree(event);
2915}
2916
2917static void ring_buffer_put(struct ring_buffer *rb);
2918static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
2919
2920static void free_event(struct perf_event *event)
2921{
2922        irq_work_sync(&event->pending);
2923
2924        if (!event->parent) {
2925                if (event->attach_state & PERF_ATTACH_TASK)
2926                        static_key_slow_dec_deferred(&perf_sched_events);
2927                if (event->attr.mmap || event->attr.mmap_data)
2928                        atomic_dec(&nr_mmap_events);
2929                if (event->attr.comm)
2930                        atomic_dec(&nr_comm_events);
2931                if (event->attr.task)
2932                        atomic_dec(&nr_task_events);
2933                if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
2934                        put_callchain_buffers();
2935                if (is_cgroup_event(event)) {
2936                        atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
2937                        static_key_slow_dec_deferred(&perf_sched_events);
2938                }
2939
2940                if (has_branch_stack(event)) {
2941                        static_key_slow_dec_deferred(&perf_sched_events);
2942                        /* is system-wide event */
2943                        if (!(event->attach_state & PERF_ATTACH_TASK)) {
2944                                atomic_dec(&per_cpu(perf_branch_stack_events,
2945                                                    event->cpu));
2946                        }
2947                }
2948        }
2949
2950        if (event->rb) {
2951                struct ring_buffer *rb;
2952
2953                /*
2954                 * Can happen when we close an event with re-directed output.
2955                 *
2956                 * Since we have a 0 refcount, perf_mmap_close() will skip
2957                 * over us; possibly making our ring_buffer_put() the last.
2958                 */
2959                mutex_lock(&event->mmap_mutex);
2960                rb = event->rb;
2961                if (rb) {
2962                        rcu_assign_pointer(event->rb, NULL);
2963                        ring_buffer_detach(event, rb);
2964                        ring_buffer_put(rb); /* could be last */
2965                }
2966                mutex_unlock(&event->mmap_mutex);
2967        }
2968
2969        if (is_cgroup_event(event))
2970                perf_detach_cgroup(event);
2971
2972        if (event->destroy)
2973                event->destroy(event);
2974
2975        if (event->ctx)
2976                put_ctx(event->ctx);
2977
2978        call_rcu(&event->rcu_head, free_event_rcu);
2979}
2980
2981int perf_event_release_kernel(struct perf_event *event)
2982{
2983        struct perf_event_context *ctx = event->ctx;
2984
2985        WARN_ON_ONCE(ctx->parent_ctx);
2986        /*
2987         * There are two ways this annotation is useful:
2988         *
2989         *  1) there is a lock recursion from perf_event_exit_task
2990         *     see the comment there.
2991         *
2992         *  2) there is a lock-inversion with mmap_sem through
2993         *     perf_event_read_group(), which takes faults while
2994         *     holding ctx->mutex, however this is called after
2995         *     the last filedesc died, so there is no possibility
2996         *     to trigger the AB-BA case.
2997         */
2998        mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
2999        raw_spin_lock_irq(&ctx->lock);
3000        perf_group_detach(event);
3001        raw_spin_unlock_irq(&ctx->lock);
3002        perf_remove_from_context(event);
3003        mutex_unlock(&ctx->mutex);
3004
3005        free_event(event);
3006
3007        return 0;
3008}
3009EXPORT_SYMBOL_GPL(perf_event_release_kernel);
3010
3011/*
3012 * Called when the last reference to the file is gone.
3013 */
3014static void put_event(struct perf_event *event)
3015{
3016        struct task_struct *owner;
3017
3018        if (!atomic_long_dec_and_test(&event->refcount))
3019                return;
3020
3021        rcu_read_lock();
3022        owner = ACCESS_ONCE(event->owner);
3023        /*
3024         * Matches the smp_wmb() in perf_event_exit_task(). If we observe
3025         * !owner it means the list deletion is complete and we can indeed
3026         * free this event, otherwise we need to serialize on
3027         * owner->perf_event_mutex.
3028         */
3029        smp_read_barrier_depends();
3030        if (owner) {
3031                /*
3032                 * Since delayed_put_task_struct() also drops the last
3033                 * task reference we can safely take a new reference
3034                 * while holding the rcu_read_lock().
3035                 */
3036                get_task_struct(owner);
3037        }
3038        rcu_read_unlock();
3039
3040        if (owner) {
3041                mutex_lock(&owner->perf_event_mutex);
3042                /*
3043                 * We have to re-check the event->owner field, if it is cleared
3044                 * we raced with perf_event_exit_task(), acquiring the mutex
3045                 * ensured they're done, and we can proceed with freeing the
3046                 * event.
3047                 */
3048                if (event->owner)
3049                        list_del_init(&event->owner_entry);
3050                mutex_unlock(&owner->perf_event_mutex);
3051                put_task_struct(owner);
3052        }
3053
3054        perf_event_release_kernel(event);
3055}
3056
3057static int perf_release(struct inode *inode, struct file *file)
3058{
3059        put_event(file->private_data);
3060        return 0;
3061}
3062
3063u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
3064{
3065        struct perf_event *child;
3066        u64 total = 0;
3067
3068        *enabled = 0;
3069        *running = 0;
3070
3071        mutex_lock(&event->child_mutex);
3072        total += perf_event_read(event);
3073        *enabled += event->total_time_enabled +
3074                        atomic64_read(&event->child_total_time_enabled);
3075        *running += event->total_time_running +
3076                        atomic64_read(&event->child_total_time_running);
3077
3078        list_for_each_entry(child, &event->child_list, child_list) {
3079                total += perf_event_read(child);
3080                *enabled += child->total_time_enabled;
3081                *running += child->total_time_running;
3082        }
3083        mutex_unlock(&event->child_mutex);
3084
3085        return total;
3086}
3087EXPORT_SYMBOL_GPL(perf_event_read_value);
3088
3089static int perf_event_read_group(struct perf_event *event,
3090                                   u64 read_format, char __user *buf)
3091{
3092        struct perf_event *leader = event->group_leader, *sub;
3093        int n = 0, size = 0, ret = -EFAULT;
3094        struct perf_event_context *ctx = leader->ctx;
3095        u64 values[5];
3096        u64 count, enabled, running;
3097
3098        mutex_lock(&ctx->mutex);
3099        count = perf_event_read_value(leader, &enabled, &running);
3100
3101        values[n++] = 1 + leader->nr_siblings;
3102        if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3103                values[n++] = enabled;
3104        if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3105                values[n++] = running;
3106        values[n++] = count;
3107        if (read_format & PERF_FORMAT_ID)
3108                values[n++] = primary_event_id(leader);
3109
3110        size = n * sizeof(u64);
3111
3112        if (copy_to_user(buf, values, size))
3113                goto unlock;
3114
3115        ret = size;
3116
3117        list_for_each_entry(sub, &leader->sibling_list, group_entry) {
3118                n = 0;
3119
3120                values[n++] = perf_event_read_value(sub, &enabled, &running);
3121                if (read_format & PERF_FORMAT_ID)
3122                        values[n++] = primary_event_id(sub);
3123
3124                size = n * sizeof(u64);
3125
3126                if (copy_to_user(buf + ret, values, size)) {
3127                        ret = -EFAULT;
3128                        goto unlock;
3129                }
3130
3131                ret += size;
3132        }
3133unlock:
3134        mutex_unlock(&ctx->mutex);
3135
3136        return ret;
3137}
3138
3139static int perf_event_read_one(struct perf_event *event,
3140                                 u64 read_format, char __user *buf)
3141{
3142        u64 enabled, running;
3143        u64 values[4];
3144        int n = 0;
3145
3146        values[n++] = perf_event_read_value(event, &enabled, &running);
3147        if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
3148                values[n++] = enabled;
3149        if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
3150                values[n++] = running;
3151        if (read_format & PERF_FORMAT_ID)
3152                values[n++] = primary_event_id(event);
3153
3154        if (copy_to_user(buf, values, n * sizeof(u64)))
3155                return -EFAULT;
3156
3157        return n * sizeof(u64);
3158}
3159
3160/*
3161 * Read the performance event - simple non blocking version for now
3162 */
3163static ssize_t
3164perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
3165{
3166        u64 read_format = event->attr.read_format;
3167        int ret;
3168
3169        /*
3170         * Return end-of-file for a read on a event that is in
3171         * error state (i.e. because it was pinned but it couldn't be
3172         * scheduled on to the CPU at some point).
3173         */
3174        if (event->state == PERF_EVENT_STATE_ERROR)
3175                return 0;
3176
3177        if (count < event->read_size)
3178                return -ENOSPC;
3179
3180        WARN_ON_ONCE(event->ctx->parent_ctx);
3181        if (read_format & PERF_FORMAT_GROUP)
3182                ret = perf_event_read_group(event, read_format, buf);
3183        else
3184                ret = perf_event_read_one(event, read_format, buf);
3185
3186        return ret;
3187}
3188
3189static ssize_t
3190perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
3191{
3192        struct perf_event *event = file->private_data;
3193
3194        return perf_read_hw(event, buf, count);
3195}
3196
3197static unsigned int perf_poll(struct file *file, poll_table *wait)
3198{
3199        struct perf_event *event = file->private_data;
3200        struct ring_buffer *rb;
3201        unsigned int events = POLL_HUP;
3202
3203        /*
3204         * Pin the event->rb by taking event->mmap_mutex; otherwise
3205         * perf_event_set_output() can swizzle our rb and make us miss wakeups.
3206         */
3207        mutex_lock(&event->mmap_mutex);
3208        rb = event->rb;
3209        if (rb)
3210                events = atomic_xchg(&rb->poll, 0);
3211        mutex_unlock(&event->mmap_mutex);
3212
3213        poll_wait(file, &event->waitq, wait);
3214
3215        return events;
3216}
3217
3218static void perf_event_reset(struct perf_event *event)
3219{
3220        (void)perf_event_read(event);
3221        local64_set(&event->count, 0);
3222        perf_event_update_userpage(event);
3223}
3224
3225/*
3226 * Holding the top-level event's child_mutex means that any
3227 * descendant process that has inherited this event will block
3228 * in sync_child_event if it goes to exit, thus satisfying the
3229 * task existence requirements of perf_event_enable/disable.
3230 */
3231static void perf_event_for_each_child(struct perf_event *event,
3232                                        void (*func)(struct perf_event *))
3233{
3234        struct perf_event *child;
3235
3236        WARN_ON_ONCE(event->ctx->parent_ctx);
3237        mutex_lock(&event->child_mutex);
3238        func(event);
3239        list_for_each_entry(child, &event->child_list, child_list)
3240                func(child);
3241        mutex_unlock(&event->child_mutex);
3242}
3243
3244static void perf_event_for_each(struct perf_event *event,
3245                                  void (*func)(struct perf_event *))
3246{
3247        struct perf_event_context *ctx = event->ctx;
3248        struct perf_event *sibling;
3249
3250        WARN_ON_ONCE(ctx->parent_ctx);
3251        mutex_lock(&ctx->mutex);
3252        event = event->group_leader;
3253
3254        perf_event_for_each_child(event, func);
3255        list_for_each_entry(sibling, &event->sibling_list, group_entry)
3256                perf_event_for_each_child(sibling, func);
3257        mutex_unlock(&ctx->mutex);
3258}
3259
3260static int perf_event_period(struct perf_event *event, u64 __user *arg)
3261{
3262        struct perf_event_context *ctx = event->ctx;
3263        int ret = 0;
3264        u64 value;
3265
3266        if (!is_sampling_event(event))
3267                return -EINVAL;
3268
3269        if (copy_from_user(&value, arg, sizeof(value)))
3270                return -EFAULT;
3271
3272        if (!value)
3273                return -EINVAL;
3274
3275        raw_spin_lock_irq(&ctx->lock);
3276        if (event->attr.freq) {
3277                if (value > sysctl_perf_event_sample_rate) {
3278                        ret = -EINVAL;
3279                        goto unlock;
3280                }
3281
3282                event->attr.sample_freq = value;
3283        } else {
3284                event->attr.sample_period = value;
3285                event->hw.sample_period = value;
3286        }
3287unlock:
3288        raw_spin_unlock_irq(&ctx->lock);
3289
3290        return ret;
3291}
3292
3293static const struct file_operations perf_fops;
3294
3295static inline int perf_fget_light(int fd, struct fd *p)
3296{
3297        struct fd f = fdget(fd);
3298        if (!f.file)
3299                return -EBADF;
3300
3301        if (f.file->f_op != &perf_fops) {
3302                fdput(f);
3303                return -EBADF;
3304        }
3305        *p = f;
3306        return 0;
3307}
3308
3309static int perf_event_set_output(struct perf_event *event,
3310                                 struct perf_event *output_event);
3311static int perf_event_set_filter(struct perf_event *event, void __user *arg);
3312
3313static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
3314{
3315        struct perf_event *event = file->private_data;
3316        void (*func)(struct perf_event *);
3317        u32 flags = arg;
3318
3319        switch (cmd) {
3320        case PERF_EVENT_IOC_ENABLE:
3321                func = perf_event_enable;
3322                break;
3323        case PERF_EVENT_IOC_DISABLE:
3324                func = perf_event_disable;
3325                break;
3326        case PERF_EVENT_IOC_RESET:
3327                func = perf_event_reset;
3328                break;
3329
3330        case PERF_EVENT_IOC_REFRESH:
3331                return perf_event_refresh(event, arg);
3332
3333        case PERF_EVENT_IOC_PERIOD:
3334                return perf_event_period(event, (u64 __user *)arg);
3335
3336        case PERF_EVENT_IOC_SET_OUTPUT:
3337        {
3338                int ret;
3339                if (arg != -1) {
3340                        struct perf_event *output_event;
3341                        struct fd output;
3342                        ret = perf_fget_light(arg, &output);
3343                        if (ret)
3344                                return ret;
3345                        output_event = output.file->private_data;
3346                        ret = perf_event_set_output(event, output_event);
3347                        fdput(output);
3348                } else {
3349                        ret = perf_event_set_output(event, NULL);
3350                }
3351                return ret;
3352        }
3353
3354        case PERF_EVENT_IOC_SET_FILTER:
3355                return perf_event_set_filter(event, (void __user *)arg);
3356
3357        default:
3358                return -ENOTTY;
3359        }
3360
3361        if (flags & PERF_IOC_FLAG_GROUP)
3362                perf_event_for_each(event, func);
3363        else
3364                perf_event_for_each_child(event, func);
3365
3366        return 0;
3367}
3368
3369int perf_event_task_enable(void)
3370{
3371        struct perf_event *event;
3372
3373        mutex_lock(&current->perf_event_mutex);
3374        list_for_each_entry(event, &current->perf_event_list, owner_entry)
3375                perf_event_for_each_child(event, perf_event_enable);
3376        mutex_unlock(&current->perf_event_mutex);
3377
3378        return 0;
3379}
3380
3381int perf_event_task_disable(void)
3382{
3383        struct perf_event *event;
3384
3385        mutex_lock(&current->perf_event_mutex);
3386        list_for_each_entry(event, &current->perf_event_list, owner_entry)
3387                perf_event_for_each_child(event, perf_event_disable);
3388        mutex_unlock(&current->perf_event_mutex);
3389
3390        return 0;
3391}
3392
3393static int perf_event_index(struct perf_event *event)
3394{
3395        if (event->hw.state & PERF_HES_STOPPED)
3396                return 0;
3397
3398        if (event->state != PERF_EVENT_STATE_ACTIVE)
3399                return 0;
3400
3401        return event->pmu->event_idx(event);
3402}
3403
3404static void calc_timer_values(struct perf_event *event,
3405                                u64 *now,
3406                                u64 *enabled,
3407                                u64 *running)
3408{
3409        u64 ctx_time;
3410
3411        *now = perf_clock();
3412        ctx_time = event->shadow_ctx_time + *now;
3413        *enabled = ctx_time - event->tstamp_enabled;
3414        *running = ctx_time - event->tstamp_running;
3415}
3416
3417void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
3418{
3419}
3420
3421/*
3422 * Callers need to ensure there can be no nesting of this function, otherwise
3423 * the seqlock logic goes bad. We can not serialize this because the arch
3424 * code calls this from NMI context.
3425 */
3426void perf_event_update_userpage(struct perf_event *event)
3427{
3428        struct perf_event_mmap_page *userpg;
3429        struct ring_buffer *rb;
3430        u64 enabled, running, now;
3431
3432        rcu_read_lock();
3433        /*
3434         * compute total_time_enabled, total_time_running
3435         * based on snapshot values taken when the event
3436         * was last scheduled in.
3437         *
3438         * we cannot simply called update_context_time()
3439         * because of locking issue as we can be called in
3440         * NMI context
3441         */
3442        calc_timer_values(event, &now, &enabled, &running);
3443        rb = rcu_dereference(event->rb);
3444        if (!rb)
3445                goto unlock;
3446
3447        userpg = rb->user_page;
3448
3449        /*
3450         * Disable preemption so as to not let the corresponding user-space
3451         * spin too long if we get preempted.
3452         */
3453        preempt_disable();
3454        ++userpg->lock;
3455        barrier();
3456        userpg->index = perf_event_index(event);
3457        userpg->offset = perf_event_count(event);
3458        if (userpg->index)
3459                userpg->offset -= local64_read(&event->hw.prev_count);
3460
3461        userpg->time_enabled = enabled +
3462                        atomic64_read(&event->child_total_time_enabled);
3463
3464        userpg->time_running = running +
3465                        atomic64_read(&event->child_total_time_running);
3466
3467        arch_perf_update_userpage(userpg, now);
3468
3469        barrier();
3470        ++userpg->lock;
3471        preempt_enable();
3472unlock:
3473        rcu_read_unlock();
3474}
3475
3476static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3477{
3478        struct perf_event *event = vma->vm_file->private_data;
3479        struct ring_buffer *rb;
3480        int ret = VM_FAULT_SIGBUS;
3481
3482        if (vmf->flags & FAULT_FLAG_MKWRITE) {
3483                if (vmf->pgoff == 0)
3484                        ret = 0;
3485                return ret;
3486        }
3487
3488        rcu_read_lock();
3489        rb = rcu_dereference(event->rb);
3490        if (!rb)
3491                goto unlock;
3492
3493        if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
3494                goto unlock;
3495
3496        vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
3497        if (!vmf->page)
3498                goto unlock;
3499
3500        get_page(vmf->page);
3501        vmf->page->mapping = vma->vm_file->f_mapping;
3502        vmf->page->index   = vmf->pgoff;
3503
3504        ret = 0;
3505unlock:
3506        rcu_read_unlock();
3507
3508        return ret;
3509}
3510
3511static void ring_buffer_attach(struct perf_event *event,
3512                               struct ring_buffer *rb)
3513{
3514        unsigned long flags;
3515
3516        if (!list_empty(&event->rb_entry))
3517                return;
3518
3519        spin_lock_irqsave(&rb->event_lock, flags);
3520        if (list_empty(&event->rb_entry))
3521                list_add(&event->rb_entry, &rb->event_list);
3522        spin_unlock_irqrestore(&rb->event_lock, flags);
3523}
3524
3525static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
3526{
3527        unsigned long flags;
3528
3529        if (list_empty(&event->rb_entry))
3530                return;
3531
3532        spin_lock_irqsave(&rb->event_lock, flags);
3533        list_del_init(&event->rb_entry);
3534        wake_up_all(&event->waitq);
3535        spin_unlock_irqrestore(&rb->event_lock, flags);
3536}
3537
3538static void ring_buffer_wakeup(struct perf_event *event)
3539{
3540        struct ring_buffer *rb;
3541
3542        rcu_read_lock();
3543        rb = rcu_dereference(event->rb);
3544        if (rb) {
3545                list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
3546                        wake_up_all(&event->waitq);
3547        }
3548        rcu_read_unlock();
3549}
3550
3551static void rb_free_rcu(struct rcu_head *rcu_head)
3552{
3553        struct ring_buffer *rb;
3554
3555        rb = container_of(rcu_head, struct ring_buffer, rcu_head);
3556        rb_free(rb);
3557}
3558
3559static struct ring_buffer *ring_buffer_get(struct perf_event *event)
3560{
3561        struct ring_buffer *rb;
3562
3563        rcu_read_lock();
3564        rb = rcu_dereference(event->rb);
3565        if (rb) {
3566                if (!atomic_inc_not_zero(&rb->refcount))
3567                        rb = NULL;
3568        }
3569        rcu_read_unlock();
3570
3571        return rb;
3572}
3573
3574static void ring_buffer_put(struct ring_buffer *rb)
3575{
3576        if (!atomic_dec_and_test(&rb->refcount))
3577                return;
3578
3579        WARN_ON_ONCE(!list_empty(&rb->event_list));
3580
3581        call_rcu(&rb->rcu_head, rb_free_rcu);
3582}
3583
3584static void perf_mmap_open(struct vm_area_struct *vma)
3585{
3586        struct perf_event *event = vma->vm_file->private_data;
3587
3588        atomic_inc(&event->mmap_count);
3589        atomic_inc(&event->rb->mmap_count);
3590}
3591
3592/*
3593 * A buffer can be mmap()ed multiple times; either directly through the same
3594 * event, or through other events by use of perf_event_set_output().
3595 *
3596 * In order to undo the VM accounting done by perf_mmap() we need to destroy
3597 * the buffer here, where we still have a VM context. This means we need
3598 * to detach all events redirecting to us.
3599 */
3600static void perf_mmap_close(struct vm_area_struct *vma)
3601{
3602        struct perf_event *event = vma->vm_file->private_data;
3603
3604        struct ring_buffer *rb = event->rb;
3605        struct user_struct *mmap_user = rb->mmap_user;
3606        int mmap_locked = rb->mmap_locked;
3607        unsigned long size = perf_data_size(rb);
3608
3609        atomic_dec(&rb->mmap_count);
3610
3611        if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
3612                return;
3613
3614        /* Detach current event from the buffer. */
3615        rcu_assign_pointer(event->rb, NULL);
3616        ring_buffer_detach(event, rb);
3617        mutex_unlock(&event->mmap_mutex);
3618
3619        /* If there's still other mmap()s of this buffer, we're done. */
3620        if (atomic_read(&rb->mmap_count)) {
3621                ring_buffer_put(rb); /* can't be last */
3622                return;
3623        }
3624
3625        /*
3626         * No other mmap()s, detach from all other events that might redirect
3627         * into the now unreachable buffer. Somewhat complicated by the
3628         * fact that rb::event_lock otherwise nests inside mmap_mutex.
3629         */
3630again:
3631        rcu_read_lock();
3632        list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
3633                if (!atomic_long_inc_not_zero(&event->refcount)) {
3634                        /*
3635                         * This event is en-route to free_event() which will
3636                         * detach it and remove it from the list.
3637                         */
3638                        continue;
3639                }
3640                rcu_read_unlock();
3641
3642                mutex_lock(&event->mmap_mutex);
3643                /*
3644                 * Check we didn't race with perf_event_set_output() which can
3645                 * swizzle the rb from under us while we were waiting to
3646                 * acquire mmap_mutex.
3647                 *
3648                 * If we find a different rb; ignore this event, a next
3649                 * iteration will no longer find it on the list. We have to
3650                 * still restart the iteration to make sure we're not now
3651                 * iterating the wrong list.
3652                 */
3653                if (event->rb == rb) {
3654                        rcu_assign_pointer(event->rb, NULL);
3655                        ring_buffer_detach(event, rb);
3656                        ring_buffer_put(rb); /* can't be last, we still have one */
3657                }
3658                mutex_unlock(&event->mmap_mutex);
3659                put_event(event);
3660
3661                /*
3662                 * Restart the iteration; either we're on the wrong list or
3663                 * destroyed its integrity by doing a deletion.
3664                 */
3665                goto again;
3666        }
3667        rcu_read_unlock();
3668
3669        /*
3670         * It could be there's still a few 0-ref events on the list; they'll
3671         * get cleaned up by free_event() -- they'll also still have their
3672         * ref on the rb and will free it whenever they are done with it.
3673         *
3674         * Aside from that, this buffer is 'fully' detached and unmapped,
3675         * undo the VM accounting.
3676         */
3677
3678        atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
3679        vma->vm_mm->pinned_vm -= mmap_locked;
3680        free_uid(mmap_user);
3681
3682        ring_buffer_put(rb); /* could be last */
3683}
3684
3685static const struct vm_operations_struct perf_mmap_vmops = {
3686        .open           = perf_mmap_open,
3687        .close          = perf_mmap_close,
3688        .fault          = perf_mmap_fault,
3689        .page_mkwrite   = perf_mmap_fault,
3690};
3691
3692static int perf_mmap(struct file *file, struct vm_area_struct *vma)
3693{
3694        struct perf_event *event = file->private_data;
3695        unsigned long user_locked, user_lock_limit;
3696        struct user_struct *user = current_user();
3697        unsigned long locked, lock_limit;
3698        struct ring_buffer *rb;
3699        unsigned long vma_size;
3700        unsigned long nr_pages;
3701        long user_extra, extra;
3702        int ret = 0, flags = 0;
3703
3704        /*
3705         * Don't allow mmap() of inherited per-task counters. This would
3706         * create a performance issue due to all children writing to the
3707         * same rb.
3708         */
3709        if (event->cpu == -1 && event->attr.inherit)
3710                return -EINVAL;
3711
3712        if (!(vma->vm_flags & VM_SHARED))
3713                return -EINVAL;
3714
3715        vma_size = vma->vm_end - vma->vm_start;
3716        nr_pages = (vma_size / PAGE_SIZE) - 1;
3717
3718        /*
3719         * If we have rb pages ensure they're a power-of-two number, so we
3720         * can do bitmasks instead of modulo.
3721         */
3722        if (nr_pages != 0 && !is_power_of_2(nr_pages))
3723                return -EINVAL;
3724
3725        if (vma_size != PAGE_SIZE * (1 + nr_pages))
3726                return -EINVAL;
3727
3728        if (vma->vm_pgoff != 0)
3729                return -EINVAL;
3730
3731        WARN_ON_ONCE(event->ctx->parent_ctx);
3732again:
3733        mutex_lock(&event->mmap_mutex);
3734        if (event->rb) {
3735                if (event->rb->nr_pages != nr_pages) {
3736                        ret = -EINVAL;
3737                        goto unlock;
3738                }
3739
3740                if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
3741                        /*
3742                         * Raced against perf_mmap_close() through
3743                         * perf_event_set_output(). Try again, hope for better
3744                         * luck.
3745                         */
3746                        mutex_unlock(&event->mmap_mutex);
3747                        goto again;
3748                }
3749
3750                goto unlock;
3751        }
3752
3753        user_extra = nr_pages + 1;
3754        user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
3755
3756        /*
3757         * Increase the limit linearly with more CPUs:
3758         */
3759        user_lock_limit *= num_online_cpus();
3760
3761        user_locked = atomic_long_read(&user->locked_vm) + user_extra;
3762
3763        extra = 0;
3764        if (user_locked > user_lock_limit)
3765                extra = user_locked - user_lock_limit;
3766
3767        lock_limit = rlimit(RLIMIT_MEMLOCK);
3768        lock_limit >>= PAGE_SHIFT;
3769        locked = vma->vm_mm->pinned_vm + extra;
3770
3771        if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
3772                !capable(CAP_IPC_LOCK)) {
3773                ret = -EPERM;
3774                goto unlock;
3775        }
3776
3777        WARN_ON(event->rb);
3778
3779        if (vma->vm_flags & VM_WRITE)
3780                flags |= RING_BUFFER_WRITABLE;
3781
3782        rb = rb_alloc(nr_pages, 
3783                event->attr.watermark ? event->attr.wakeup_watermark : 0,
3784                event->cpu, flags);
3785
3786        if (!rb) {
3787                ret = -ENOMEM;
3788                goto unlock;
3789        }
3790
3791        atomic_set(&rb->mmap_count, 1);
3792        rb->mmap_locked = extra;
3793        rb->mmap_user = get_current_user();
3794
3795        atomic_long_add(user_extra, &user->locked_vm);
3796        vma->vm_mm->pinned_vm += extra;
3797
3798        ring_buffer_attach(event, rb);
3799        rcu_assign_pointer(event->rb, rb);
3800
3801        perf_event_update_userpage(event);
3802
3803unlock:
3804        if (!ret)
3805                atomic_inc(&event->mmap_count);
3806        mutex_unlock(&event->mmap_mutex);
3807
3808        /*
3809         * Since pinned accounting is per vm we cannot allow fork() to copy our
3810         * vma.
3811         */
3812        vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
3813        vma->vm_ops = &perf_mmap_vmops;
3814
3815        return ret;
3816}
3817
3818static int perf_fasync(int fd, struct file *filp, int on)
3819{
3820        struct inode *inode = file_inode(filp);
3821        struct perf_event *event = filp->private_data;
3822        int retval;
3823
3824        mutex_lock(&inode->i_mutex);
3825        retval = fasync_helper(fd, filp, on, &event->fasync);
3826        mutex_unlock(&inode->i_mutex);
3827
3828        if (retval < 0)
3829                return retval;
3830
3831        return 0;
3832}
3833
3834static const struct file_operations perf_fops = {
3835        .llseek                 = no_llseek,
3836        .release                = perf_release,
3837        .read                   = perf_read,
3838        .poll                   = perf_poll,
3839        .unlocked_ioctl         = perf_ioctl,
3840        .compat_ioctl           = perf_ioctl,
3841        .mmap                   = perf_mmap,
3842        .fasync                 = perf_fasync,
3843};
3844
3845/*
3846 * Perf event wakeup
3847 *
3848 * If there's data, ensure we set the poll() state and publish everything
3849 * to user-space before waking everybody up.
3850 */
3851
3852void perf_event_wakeup(struct perf_event *event)
3853{
3854        ring_buffer_wakeup(event);
3855
3856        if (event->pending_kill) {
3857                kill_fasync(&event->fasync, SIGIO, event->pending_kill);
3858                event->pending_kill = 0;
3859        }
3860}
3861
3862static void perf_pending_event(struct irq_work *entry)
3863{
3864        struct perf_event *event = container_of(entry,
3865                        struct perf_event, pending);
3866
3867        if (event->pending_disable) {
3868                event->pending_disable = 0;
3869                __perf_event_disable(event);
3870        }
3871
3872        if (event->pending_wakeup) {
3873                event->pending_wakeup = 0;
3874                perf_event_wakeup(event);
3875        }
3876}
3877
3878/*
3879 * We assume there is only KVM supporting the callbacks.
3880 * Later on, we might change it to a list if there is
3881 * another virtualization implementation supporting the callbacks.
3882 */
3883struct perf_guest_info_callbacks *perf_guest_cbs;
3884
3885int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3886{
3887        perf_guest_cbs = cbs;
3888        return 0;
3889}
3890EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
3891
3892int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
3893{
3894        perf_guest_cbs = NULL;
3895        return 0;
3896}
3897EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
3898
3899static void
3900perf_output_sample_regs(struct perf_output_handle *handle,
3901                        struct pt_regs *regs, u64 mask)
3902{
3903        int bit;
3904
3905        for_each_set_bit(bit, (const unsigned long *) &mask,
3906                         sizeof(mask) * BITS_PER_BYTE) {
3907                u64 val;
3908
3909                val = perf_reg_value(regs, bit);
3910                perf_output_put(handle, val);
3911        }
3912}
3913
3914static void perf_sample_regs_user(struct perf_regs_user *regs_user,
3915                                  struct pt_regs *regs)
3916{
3917        if (!user_mode(regs)) {
3918                if (current->mm)
3919                        regs = task_pt_regs(current);
3920                else
3921                        regs = NULL;
3922        }
3923
3924        if (regs) {
3925                regs_user->regs = regs;
3926                regs_user->abi  = perf_reg_abi(current);
3927        }
3928}
3929
3930/*
3931 * Get remaining task size from user stack pointer.
3932 *
3933 * It'd be better to take stack vma map and limit this more
3934 * precisly, but there's no way to get it safely under interrupt,
3935 * so using TASK_SIZE as limit.
3936 */
3937static u64 perf_ustack_task_size(struct pt_regs *regs)
3938{
3939        unsigned long addr = perf_user_stack_pointer(regs);
3940
3941        if (!addr || addr >= TASK_SIZE)
3942                return 0;
3943
3944        return TASK_SIZE - addr;
3945}
3946
3947static u16
3948perf_sample_ustack_size(u16 stack_size, u16 header_size,
3949                        struct pt_regs *regs)
3950{
3951        u64 task_size;
3952
3953        /* No regs, no stack pointer, no dump. */
3954        if (!regs)
3955                return 0;
3956
3957        /*
3958         * Check if we fit in with the requested stack size into the:
3959         * - TASK_SIZE
3960         *   If we don't, we limit the size to the TASK_SIZE.
3961         *
3962         * - remaining sample size
3963         *   If we don't, we customize the stack size to
3964         *   fit in to the remaining sample size.
3965         */
3966
3967        task_size  = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
3968        stack_size = min(stack_size, (u16) task_size);
3969
3970        /* Current header size plus static size and dynamic size. */
3971        header_size += 2 * sizeof(u64);
3972
3973        /* Do we fit in with the current stack dump size? */
3974        if ((u16) (header_size + stack_size) < header_size) {
3975                /*
3976                 * If we overflow the maximum size for the sample,
3977                 * we customize the stack dump size to fit in.
3978                 */
3979                stack_size = USHRT_MAX - header_size - sizeof(u64);
3980                stack_size = round_up(stack_size, sizeof(u64));
3981        }
3982
3983        return stack_size;
3984}
3985
3986static void
3987perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
3988                          struct pt_regs *regs)
3989{
3990        /* Case of a kernel thread, nothing to dump */
3991        if (!regs) {
3992                u64 size = 0;
3993                perf_output_put(handle, size);
3994        } else {
3995                unsigned long sp;
3996                unsigned int rem;
3997                u64 dyn_size;
3998
3999                /*
4000                 * We dump:
4001                 * static size
4002                 *   - the size requested by user or the best one we can fit
4003                 *     in to the sample max size
4004                 * data
4005                 *   - user stack dump data
4006                 * dynamic size
4007                 *   - the actual dumped size
4008                 */
4009
4010                /* Static size. */
4011                perf_output_put(handle, dump_size);
4012
4013                /* Data. */
4014                sp = perf_user_stack_pointer(regs);
4015                rem = __output_copy_user(handle, (void *) sp, dump_size);
4016                dyn_size = dump_size - rem;
4017
4018                perf_output_skip(handle, rem);
4019
4020                /* Dynamic size. */
4021                perf_output_put(handle, dyn_size);
4022        }
4023}
4024
4025static void __perf_event_header__init_id(struct perf_event_header *header,
4026                                         struct perf_sample_data *data,
4027                                         struct perf_event *event)
4028{
4029        u64 sample_type = event->attr.sample_type;
4030
4031        data->type = sample_type;
4032        header->size += event->id_header_size;
4033
4034        if (sample_type & PERF_SAMPLE_TID) {
4035                /* namespace issues */
4036                data->tid_entry.pid = perf_event_pid(event, current);
4037                data->tid_entry.tid = perf_event_tid(event, current);
4038        }
4039
4040        if (sample_type & PERF_SAMPLE_TIME)
4041                data->time = perf_clock();
4042
4043        if (sample_type & PERF_SAMPLE_ID)
4044                data->id = primary_event_id(event);
4045
4046        if (sample_type & PERF_SAMPLE_STREAM_ID)
4047                data->stream_id = event->id;
4048
4049        if (sample_type & PERF_SAMPLE_CPU) {
4050                data->cpu_entry.cpu      = raw_smp_processor_id();
4051                data->cpu_entry.reserved = 0;
4052        }
4053}
4054
4055void perf_event_header__init_id(struct perf_event_header *header,
4056                                struct perf_sample_data *data,
4057                                struct perf_event *event)
4058{
4059        if (event->attr.sample_id_all)
4060                __perf_event_header__init_id(header, data, event);
4061}
4062
4063static void __perf_event__output_id_sample(struct perf_output_handle *handle,
4064                                           struct perf_sample_data *data)
4065{
4066        u64 sample_type = data->type;
4067
4068        if (sample_type & PERF_SAMPLE_TID)
4069                perf_output_put(handle, data->tid_entry);
4070
4071        if (sample_type & PERF_SAMPLE_TIME)
4072                perf_output_put(handle, data->time);
4073
4074        if (sample_type & PERF_SAMPLE_ID)
4075                perf_output_put(handle, data->id);
4076
4077        if (sample_type & PERF_SAMPLE_STREAM_ID)
4078                perf_output_put(handle, data->stream_id);
4079
4080        if (sample_type & PERF_SAMPLE_CPU)
4081                perf_output_put(handle, data->cpu_entry);
4082}
4083
4084void perf_event__output_id_sample(struct perf_event *event,
4085                                  struct perf_output_handle *handle,
4086                                  struct perf_sample_data *sample)
4087{
4088        if (event->attr.sample_id_all)
4089                __perf_event__output_id_sample(handle, sample);
4090}
4091
4092static void perf_output_read_one(struct perf_output_handle *handle,
4093                                 struct perf_event *event,
4094                                 u64 enabled, u64 running)
4095{
4096        u64 read_format = event->attr.read_format;
4097        u64 values[4];
4098        int n = 0;
4099
4100        values[n++] = perf_event_count(event);
4101        if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
4102                values[n++] = enabled +
4103                        atomic64_read(&event->child_total_time_enabled);
4104        }
4105        if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
4106                values[n++] = running +
4107                        atomic64_read(&event->child_total_time_running);
4108        }
4109        if (read_format & PERF_FORMAT_ID)
4110                values[n++] = primary_event_id(event);
4111
4112        __output_copy(handle, values, n * sizeof(u64));
4113}
4114
4115/*
4116 * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
4117 */
4118static void perf_output_read_group(struct perf_output_handle *handle,
4119                            struct perf_event *event,
4120                            u64 enabled, u64 running)
4121{
4122        struct perf_event *leader = event->group_leader, *sub;
4123        u64 read_format = event->attr.read_format;
4124        u64 values[5];
4125        int n = 0;
4126
4127        values[n++] = 1 + leader->nr_siblings;
4128
4129        if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
4130                values[n++] = enabled;
4131
4132        if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
4133                values[n++] = running;
4134
4135        if (leader != event)
4136                leader->pmu->read(leader);
4137
4138        values[n++] = perf_event_count(leader);
4139        if (read_format & PERF_FORMAT_ID)
4140                values[n++] = primary_event_id(leader);
4141
4142        __output_copy(handle, values, n * sizeof(u64));
4143
4144        list_for_each_entry(sub, &leader->sibling_list, group_entry) {
4145                n = 0;
4146
4147                if (sub != event)
4148                        sub->pmu->read(sub);
4149
4150                values[n++] = perf_event_count(sub);
4151                if (read_format & PERF_FORMAT_ID)
4152                        values[n++] = primary_event_id(sub);
4153
4154                __output_copy(handle, values, n * sizeof(u64));
4155        }
4156}
4157
4158#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
4159                                 PERF_FORMAT_TOTAL_TIME_RUNNING)
4160
4161static void perf_output_read(struct perf_output_handle *handle,
4162                             struct perf_event *event)
4163{
4164        u64 enabled = 0, running = 0, now;
4165        u64 read_format = event->attr.read_format;
4166
4167        /*
4168         * compute total_time_enabled, total_time_running
4169         * based on snapshot values taken when the event
4170         * was last scheduled in.
4171         *
4172         * we cannot simply called update_context_time()
4173         * because of locking issue as we are called in
4174         * NMI context
4175         */
4176        if (read_format & PERF_FORMAT_TOTAL_TIMES)
4177                calc_timer_values(event, &now, &enabled, &running);
4178
4179        if (event->attr.read_format & PERF_FORMAT_GROUP)
4180                perf_output_read_group(handle, event, enabled, running);
4181        else
4182                perf_output_read_one(handle, event, enabled, running);
4183}
4184
4185void perf_output_sample(struct perf_output_handle *handle,
4186                        struct perf_event_header *header,
4187                        struct perf_sample_data *data,
4188                        struct perf_event *event)
4189{
4190        u64 sample_type = data->type;
4191
4192        perf_output_put(handle, *header);
4193
4194        if (sample_type & PERF_SAMPLE_IP)
4195                perf_output_put(handle, data->ip);
4196
4197        if (sample_type & PERF_SAMPLE_TID)
4198                perf_output_put(handle, data->tid_entry);
4199
4200        if (sample_type & PERF_SAMPLE_TIME)
4201                perf_output_put(handle, data->time);
4202
4203        if (sample_type & PERF_SAMPLE_ADDR)
4204                perf_output_put(handle, data->addr);
4205
4206        if (sample_type & PERF_SAMPLE_ID)
4207                perf_output_put(handle, data->id);
4208
4209        if (sample_type & PERF_SAMPLE_STREAM_ID)
4210                perf_output_put(handle, data->stream_id);
4211
4212        if (sample_type & PERF_SAMPLE_CPU)
4213                perf_output_put(handle, data->cpu_entry);
4214
4215        if (sample_type & PERF_SAMPLE_PERIOD)
4216                perf_output_put(handle, data->period);
4217
4218        if (sample_type & PERF_SAMPLE_READ)
4219                perf_output_read(handle, event);
4220
4221        if (sample_type & PERF_SAMPLE_CALLCHAIN) {
4222                if (data->callchain) {
4223                        int size = 1;
4224
4225                        if (data->callchain)
4226                                size += data->callchain->nr;
4227
4228                        size *= sizeof(u64);
4229
4230                        __output_copy(handle, data->callchain, size);
4231                } else {
4232                        u64 nr = 0;
4233                        perf_output_put(handle, nr);
4234                }
4235        }
4236
4237        if (sample_type & PERF_SAMPLE_RAW) {
4238                if (data->raw) {
4239                        perf_output_put(handle, data->raw->size);
4240                        __output_copy(handle, data->raw->data,
4241                                           data->raw->size);
4242                } else {
4243                        struct {
4244                                u32     size;
4245                                u32     data;
4246                        } raw = {
4247                                .size = sizeof(u32),
4248                                .data = 0,
4249                        };
4250                        perf_output_put(handle, raw);
4251                }
4252        }
4253
4254        if (!event->attr.watermark) {
4255                int wakeup_events = event->attr.wakeup_events;
4256
4257                if (wakeup_events) {
4258                        struct ring_buffer *rb = handle->rb;
4259                        int events = local_inc_return(&rb->events);
4260
4261                        if (events >= wakeup_events) {
4262                                local_sub(wakeup_events, &rb->events);
4263                                local_inc(&rb->wakeup);
4264                        }
4265                }
4266        }
4267
4268        if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4269                if (data->br_stack) {
4270                        size_t size;
4271
4272                        size = data->br_stack->nr
4273                             * sizeof(struct perf_branch_entry);
4274
4275                        perf_output_put(handle, data->br_stack->nr);
4276                        perf_output_copy(handle, data->br_stack->entries, size);
4277                } else {
4278                        /*
4279                         * we always store at least the value of nr
4280                         */
4281                        u64 nr = 0;
4282                        perf_output_put(handle, nr);
4283                }
4284        }
4285
4286        if (sample_type & PERF_SAMPLE_REGS_USER) {
4287                u64 abi = data->regs_user.abi;
4288
4289                /*
4290                 * If there are no regs to dump, notice it through
4291                 * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
4292                 */
4293                perf_output_put(handle, abi);
4294
4295                if (abi) {
4296                        u64 mask = event->attr.sample_regs_user;
4297                        perf_output_sample_regs(handle,
4298                                                data->regs_user.regs,
4299                                                mask);
4300                }
4301        }
4302
4303        if (sample_type & PERF_SAMPLE_STACK_USER)
4304                perf_output_sample_ustack(handle,
4305                                          data->stack_user_size,
4306                                          data->regs_user.regs);
4307
4308        if (sample_type & PERF_SAMPLE_WEIGHT)
4309                perf_output_put(handle, data->weight);
4310
4311        if (sample_type & PERF_SAMPLE_DATA_SRC)
4312                perf_output_put(handle, data->data_src.val);
4313}
4314
4315void perf_prepare_sample(struct perf_event_header *header,
4316                         struct perf_sample_data *data,
4317                         struct perf_event *event,
4318                         struct pt_regs *regs)
4319{
4320        u64 sample_type = event->attr.sample_type;
4321
4322        header->type = PERF_RECORD_SAMPLE;
4323        header->size = sizeof(*header) + event->header_size;
4324
4325        header->misc = 0;
4326        header->misc |= perf_misc_flags(regs);
4327
4328        __perf_event_header__init_id(header, data, event);
4329
4330        if (sample_type & PERF_SAMPLE_IP)
4331                data->ip = perf_instruction_pointer(regs);
4332
4333        if (sample_type & PERF_SAMPLE_CALLCHAIN) {
4334                int size = 1;
4335
4336                data->callchain = perf_callchain(event, regs);
4337
4338                if (data->callchain)
4339                        size += data->callchain->nr;
4340
4341                header->size += size * sizeof(u64);
4342        }
4343
4344        if (sample_type & PERF_SAMPLE_RAW) {
4345                int size = sizeof(u32);
4346
4347                if (data->raw)
4348                        size += data->raw->size;
4349                else
4350                        size += sizeof(u32);
4351
4352                WARN_ON_ONCE(size & (sizeof(u64)-1));
4353                header->size += size;
4354        }
4355
4356        if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
4357                int size = sizeof(u64); /* nr */
4358                if (data->br_stack) {
4359                        size += data->br_stack->nr
4360                              * sizeof(struct perf_branch_entry);
4361                }
4362                header->size += size;
4363        }
4364
4365        if (sample_type & PERF_SAMPLE_REGS_USER) {
4366                /* regs dump ABI info */
4367                int size = sizeof(u64);
4368
4369                perf_sample_regs_user(&data->regs_user, regs);
4370
4371                if (data->regs_user.regs) {
4372                        u64 mask = event->attr.sample_regs_user;
4373                        size += hweight64(mask) * sizeof(u64);
4374                }
4375
4376                header->size += size;
4377        }
4378
4379        if (sample_type & PERF_SAMPLE_STACK_USER) {
4380                /*
4381                 * Either we need PERF_SAMPLE_STACK_USER bit to be allways
4382                 * processed as the last one or have additional check added
4383                 * in case new sample type is added, because we could eat
4384                 * up the rest of the sample size.
4385                 */
4386                struct perf_regs_user *uregs = &data->regs_user;
4387                u16 stack_size = event->attr.sample_stack_user;
4388                u16 size = sizeof(u64);
4389
4390                if (!uregs->abi)
4391                        perf_sample_regs_user(uregs, regs);
4392
4393                stack_size = perf_sample_ustack_size(stack_size, header->size,
4394                                                     uregs->regs);
4395
4396                /*
4397                 * If there is something to dump, add space for the dump
4398                 * itself and for the field that tells the dynamic size,
4399                 * which is how many have been actually dumped.
4400                 */
4401                if (stack_size)
4402                        size += sizeof(u64) + stack_size;
4403
4404                data->stack_user_size = stack_size;
4405                header->size += size;
4406        }
4407}
4408
4409static void perf_event_output(struct perf_event *event,
4410                                struct perf_sample_data *data,
4411                                struct pt_regs *regs)
4412{
4413        struct perf_output_handle handle;
4414        struct perf_event_header header;
4415
4416        /* protect the callchain buffers */
4417        rcu_read_lock();
4418
4419        perf_prepare_sample(&header, data, event, regs);
4420
4421        if (perf_output_begin(&handle, event, header.size))
4422                goto exit;
4423
4424        perf_output_sample(&handle, &header, data, event);
4425
4426        perf_output_end(&handle);
4427
4428exit:
4429        rcu_read_unlock();
4430}
4431
4432/*
4433 * read event_id
4434 */
4435
4436struct perf_read_event {
4437        struct perf_event_header        header;
4438
4439        u32                             pid;
4440        u32                             tid;
4441};
4442
4443static void
4444perf_event_read_event(struct perf_event *event,
4445                        struct task_struct *task)
4446{
4447        struct perf_output_handle handle;
4448        struct perf_sample_data sample;
4449        struct perf_read_event read_event = {
4450                .header = {
4451                        .type = PERF_RECORD_READ,
4452                        .misc = 0,
4453                        .size = sizeof(read_event) + event->read_size,
4454                },
4455                .pid = perf_event_pid(event, task),
4456                .tid = perf_event_tid(event, task),
4457        };
4458        int ret;
4459
4460        perf_event_header__init_id(&read_event.header, &sample, event);
4461        ret = perf_output_begin(&handle, event, read_event.header.size);
4462        if (ret)
4463                return;
4464
4465        perf_output_put(&handle, read_event);
4466        perf_output_read(&handle, event);
4467        perf_event__output_id_sample(event, &handle, &sample);
4468
4469        perf_output_end(&handle);
4470}
4471
4472typedef int  (perf_event_aux_match_cb)(struct perf_event *event, void *data);
4473typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
4474
4475static void
4476perf_event_aux_ctx(struct perf_event_context *ctx,
4477                   perf_event_aux_match_cb match,
4478                   perf_event_aux_output_cb output,
4479                   void *data)
4480{
4481        struct perf_event *event;
4482
4483        list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
4484                if (event->state < PERF_EVENT_STATE_INACTIVE)
4485                        continue;
4486                if (!event_filter_match(event))
4487                        continue;
4488                if (match(event, data))
4489                        output(event, data);
4490        }
4491}
4492
4493static void
4494perf_event_aux(perf_event_aux_match_cb match,
4495               perf_event_aux_output_cb output,
4496               void *data,
4497               struct perf_event_context *task_ctx)
4498{
4499        struct perf_cpu_context *cpuctx;
4500        struct perf_event_context *ctx;
4501        struct pmu *pmu;
4502        int ctxn;
4503
4504        rcu_read_lock();
4505        list_for_each_entry_rcu(pmu, &pmus, entry) {
4506                cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
4507                if (cpuctx->unique_pmu != pmu)
4508                        goto next;
4509                perf_event_aux_ctx(&cpuctx->ctx, match, output, data);
4510                if (task_ctx)
4511                        goto next;
4512                ctxn = pmu->task_ctx_nr;
4513                if (ctxn < 0)
4514                        goto next;
4515                ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
4516                if (ctx)
4517                        perf_event_aux_ctx(ctx, match, output, data);
4518next:
4519                put_cpu_ptr(pmu->pmu_cpu_context);
4520        }
4521
4522        if (task_ctx) {
4523                preempt_disable();
4524                perf_event_aux_ctx(task_ctx, match, output, data);
4525                preempt_enable();
4526        }
4527        rcu_read_unlock();
4528}
4529
4530/*
4531 * task tracking -- fork/exit
4532 *
4533 * enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
4534 */
4535
4536struct perf_task_event {
4537        struct task_struct              *task;
4538        struct perf_event_context       *task_ctx;
4539
4540        struct {
4541                struct perf_event_header        header;
4542
4543                u32                             pid;
4544                u32                             ppid;
4545                u32                             tid;
4546                u32                             ptid;
4547                u64                             time;
4548        } event_id;
4549};
4550
4551static void perf_event_task_output(struct perf_event *event,
4552                                   void *data)
4553{
4554        struct perf_task_event *task_event = data;
4555        struct perf_output_handle handle;
4556        struct perf_sample_data sample;
4557        struct task_struct *task = task_event->task;
4558        int ret, size = task_event->event_id.header.size;
4559
4560        perf_event_header__init_id(&task_event->event_id.header, &sample, event);
4561
4562        ret = perf_output_begin(&handle, event,
4563                                task_event->event_id.header.size);
4564        if (ret)
4565                goto out;
4566
4567        task_event->event_id.pid = perf_event_pid(event, task);
4568        task_event->event_id.ppid = perf_event_pid(event, current);
4569
4570        task_event->event_id.tid = perf_event_tid(event, task);
4571        task_event->event_id.ptid = perf_event_tid(event, current);
4572
4573        perf_output_put(&handle, task_event->event_id);
4574
4575        perf_event__output_id_sample(event, &handle, &sample);
4576
4577        perf_output_end(&handle);
4578out:
4579        task_event->event_id.header.size = size;
4580}
4581
4582static int perf_event_task_match(struct perf_event *event,
4583                                 void *data __maybe_unused)
4584{
4585        return event->attr.comm || event->attr.mmap ||
4586               event->attr.mmap_data || event->attr.task;
4587}
4588
4589static void perf_event_task(struct task_struct *task,
4590                              struct perf_event_context *task_ctx,
4591                              int new)
4592{
4593        struct perf_task_event task_event;
4594
4595        if (!atomic_read(&nr_comm_events) &&
4596            !atomic_read(&nr_mmap_events) &&
4597            !atomic_read(&nr_task_events))
4598                return;
4599
4600        task_event = (struct perf_task_event){
4601                .task     = task,
4602                .task_ctx = task_ctx,
4603                .event_id    = {
4604                        .header = {
4605                                .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
4606                                .misc = 0,
4607                                .size = sizeof(task_event.event_id),
4608                        },
4609                        /* .pid  */
4610                        /* .ppid */
4611                        /* .tid  */
4612                        /* .ptid */
4613                        .time = perf_clock(),
4614                },
4615        };
4616
4617        perf_event_aux(perf_event_task_match,
4618                       perf_event_task_output,
4619                       &task_event,
4620                       task_ctx);
4621}
4622
4623void perf_event_fork(struct task_struct *task)
4624{
4625        perf_event_task(task, NULL, 1);
4626}
4627
4628/*
4629 * comm tracking
4630 */
4631
4632struct perf_comm_event {
4633        struct task_struct      *task;
4634        char                    *comm;
4635        int                     comm_size;
4636
4637        struct {
4638                struct perf_event_header        header;
4639
4640                u32                             pid;
4641                u32                             tid;
4642        } event_id;
4643};
4644
4645static void perf_event_comm_output(struct perf_event *event,
4646                                   void *data)
4647{
4648        struct perf_comm_event *comm_event = data;
4649        struct perf_output_handle handle;
4650        struct perf_sample_data sample;
4651        int size = comm_event->event_id.header.size;
4652        int ret;
4653
4654        perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
4655        ret = perf_output_begin(&handle, event,
4656                                comm_event->event_id.header.size);
4657
4658        if (ret)
4659                goto out;
4660
4661        comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
4662        comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
4663
4664        perf_output_put(&handle, comm_event->event_id);
4665        __output_copy(&handle, comm_event->comm,
4666                                   comm_event->comm_size);
4667
4668        perf_event__output_id_sample(event, &handle, &sample);
4669
4670        perf_output_end(&handle);
4671out:
4672        comm_event->event_id.header.size = size;
4673}
4674
4675static int perf_event_comm_match(struct perf_event *event,
4676                                 void *data __maybe_unused)
4677{
4678        return event->attr.comm;
4679}
4680
4681static void perf_event_comm_event(struct perf_comm_event *comm_event)
4682{
4683        char comm[TASK_COMM_LEN];
4684        unsigned int size;
4685
4686        memset(comm, 0, sizeof(comm));
4687        strlcpy(comm, comm_event->task->comm, sizeof(comm));
4688        size = ALIGN(strlen(comm)+1, sizeof(u64));
4689
4690        comm_event->comm = comm;
4691        comm_event->comm_size = size;
4692
4693        comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
4694
4695        perf_event_aux(perf_event_comm_match,
4696                       perf_event_comm_output,
4697                       comm_event,
4698                       NULL);
4699}
4700
4701void perf_event_comm(struct task_struct *task)
4702{
4703        struct perf_comm_event comm_event;
4704        struct perf_event_context *ctx;
4705        int ctxn;
4706
4707        rcu_read_lock();
4708        for_each_task_context_nr(ctxn) {
4709                ctx = task->perf_event_ctxp[ctxn];
4710                if (!ctx)
4711                        continue;
4712
4713                perf_event_enable_on_exec(ctx);
4714        }
4715        rcu_read_unlock();
4716
4717        if (!atomic_read(&nr_comm_events))
4718                return;
4719
4720        comm_event = (struct perf_comm_event){
4721                .task   = task,
4722                /* .comm      */
4723                /* .comm_size */
4724                .event_id  = {
4725                        .header = {
4726                                .type = PERF_RECORD_COMM,
4727                                .misc = 0,
4728                                /* .size */
4729                        },
4730                        /* .pid */
4731                        /* .tid */
4732                },
4733        };
4734
4735        perf_event_comm_event(&comm_event);
4736}
4737
4738/*
4739 * mmap tracking
4740 */
4741
4742struct perf_mmap_event {
4743        struct vm_area_struct   *vma;
4744
4745        const char              *file_name;
4746        int                     file_size;
4747
4748        struct {
4749                struct perf_event_header        header;
4750
4751                u32                             pid;
4752                u32                             tid;
4753                u64                             start;
4754                u64                             len;
4755                u64                             pgoff;
4756        } event_id;
4757};
4758
4759static void perf_event_mmap_output(struct perf_event *event,
4760                                   void *data)
4761{
4762        struct perf_mmap_event *mmap_event = data;
4763        struct perf_output_handle handle;
4764        struct perf_sample_data sample;
4765        int size = mmap_event->event_id.header.size;
4766        int ret;
4767
4768        perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
4769        ret = perf_output_begin(&handle, event,
4770                                mmap_event->event_id.header.size);
4771        if (ret)
4772                goto out;
4773
4774        mmap_event->event_id.pid = perf_event_pid(event, current);
4775        mmap_event->event_id.tid = perf_event_tid(event, current);
4776
4777        perf_output_put(&handle, mmap_event->event_id);
4778        __output_copy(&handle, mmap_event->file_name,
4779                                   mmap_event->file_size);
4780
4781        perf_event__output_id_sample(event, &handle, &sample);
4782
4783        perf_output_end(&handle);
4784out:
4785        mmap_event->event_id.header.size = size;
4786}
4787
4788static int perf_event_mmap_match(struct perf_event *event,
4789                                 void *data)
4790{
4791        struct perf_mmap_event *mmap_event = data;
4792        struct vm_area_struct *vma = mmap_event->vma;
4793        int executable = vma->vm_flags & VM_EXEC;
4794
4795        return (!executable && event->attr.mmap_data) ||
4796               (executable && event->attr.mmap);
4797}
4798
4799static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
4800{
4801        struct vm_area_struct *vma = mmap_event->vma;
4802        struct file *file = vma->vm_file;
4803        unsigned int size;
4804        char tmp[16];
4805        char *buf = NULL;
4806        const char *name;
4807
4808        memset(tmp, 0, sizeof(tmp));
4809
4810        if (file) {
4811                /*
4812                 * d_path works from the end of the rb backwards, so we
4813                 * need to add enough zero bytes after the string to handle
4814                 * the 64bit alignment we do later.
4815                 */
4816                buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
4817                if (!buf) {
4818                        name = strncpy(tmp, "//enomem", sizeof(tmp));
4819                        goto got_name;
4820                }
4821                name = d_path(&file->f_path, buf, PATH_MAX);
4822                if (IS_ERR(name)) {
4823                        name = strncpy(tmp, "//toolong", sizeof(tmp));
4824                        goto got_name;
4825                }
4826        } else {
4827                if (arch_vma_name(mmap_event->vma)) {
4828                        name = strncpy(tmp, arch_vma_name(mmap_event->vma),
4829                                       sizeof(tmp) - 1);
4830                        tmp[sizeof(tmp) - 1] = '\0';
4831                        goto got_name;
4832                }
4833
4834                if (!vma->vm_mm) {
4835                        name = strncpy(tmp, "[vdso]", sizeof(tmp));
4836                        goto got_name;
4837                } else if (vma->vm_start <= vma->vm_mm->start_brk &&
4838                                vma->vm_end >= vma->vm_mm->brk) {
4839                        name = strncpy(tmp, "[heap]", sizeof(tmp));
4840                        goto got_name;
4841                } else if (vma->vm_start <= vma->vm_mm->start_stack &&
4842                                vma->vm_end >= vma->vm_mm->start_stack) {
4843                        name = strncpy(tmp, "[stack]", sizeof(tmp));
4844                        goto got_name;
4845                }
4846
4847                name = strncpy(tmp, "//anon", sizeof(tmp));
4848                goto got_name;
4849        }
4850
4851got_name:
4852        size = ALIGN(strlen(name)+1, sizeof(u64));
4853
4854        mmap_event->file_name = name;
4855        mmap_event->file_size = size;
4856
4857        if (!(vma->vm_flags & VM_EXEC))
4858                mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
4859
4860        mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
4861
4862        perf_event_aux(perf_event_mmap_match,
4863                       perf_event_mmap_output,
4864                       mmap_event,
4865                       NULL);
4866
4867        kfree(buf);
4868}
4869
4870void perf_event_mmap(struct vm_area_struct *vma)
4871{
4872        struct perf_mmap_event mmap_event;
4873
4874        if (!atomic_read(&nr_mmap_events))
4875                return;
4876
4877        mmap_event = (struct perf_mmap_event){
4878                .vma    = vma,
4879                /* .file_name */
4880                /* .file_size */
4881                .event_id  = {
4882                        .header = {
4883                                .type = PERF_RECORD_MMAP,
4884                                .misc = PERF_RECORD_MISC_USER,
4885                                /* .size */
4886                        },
4887                        /* .pid */
4888                        /* .tid */
4889                        .start  = vma->vm_start,
4890                        .len    = vma->vm_end - vma->vm_start,
4891                        .pgoff  = (u64)vma->vm_pgoff << PAGE_SHIFT,
4892                },
4893        };
4894
4895        perf_event_mmap_event(&mmap_event);
4896}
4897
4898/*
4899 * IRQ throttle logging
4900 */
4901
4902static void perf_log_throttle(struct perf_event *event, int enable)
4903{
4904        struct perf_output_handle handle;
4905        struct perf_sample_data sample;
4906        int ret;
4907
4908        struct {
4909                struct perf_event_header        header;
4910                u64                             time;
4911                u64                             id;
4912                u64                             stream_id;
4913        } throttle_event = {
4914                .header = {
4915                        .type = PERF_RECORD_THROTTLE,
4916                        .misc = 0,
4917                        .size = sizeof(throttle_event),
4918                },
4919                .time           = perf_clock(),
4920                .id             = primary_event_id(event),
4921                .stream_id      = event->id,
4922        };
4923
4924        if (enable)
4925                throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
4926
4927        perf_event_header__init_id(&throttle_event.header, &sample, event);
4928
4929        ret = perf_output_begin(&handle, event,
4930                                throttle_event.header.size);
4931        if (ret)
4932                return;
4933
4934        perf_output_put(&handle, throttle_event);
4935        perf_event__output_id_sample(event, &handle, &sample);
4936        perf_output_end(&handle);
4937}
4938
4939/*
4940 * Generic event overflow handling, sampling.
4941 */
4942
4943static int __perf_event_overflow(struct perf_event *event,
4944                                   int throttle, struct perf_sample_data *data,
4945                                   struct pt_regs *regs)
4946{
4947        int events = atomic_read(&event->event_limit);
4948        struct hw_perf_event *hwc = &event->hw;
4949        u64 seq;
4950        int ret = 0;
4951
4952        /*
4953         * Non-sampling counters might still use the PMI to fold short
4954         * hardware counters, ignore those.
4955         */
4956        if (unlikely(!is_sampling_event(event)))
4957                return 0;
4958
4959        seq = __this_cpu_read(perf_throttled_seq);
4960        if (seq != hwc->interrupts_seq) {
4961                hwc->interrupts_seq = seq;
4962                hwc->interrupts = 1;
4963        } else {
4964                hwc->interrupts++;
4965                if (unlikely(throttle
4966                             && hwc->interrupts >= max_samples_per_tick)) {
4967                        __this_cpu_inc(perf_throttled_count);
4968                        hwc->interrupts = MAX_INTERRUPTS;
4969                        perf_log_throttle(event, 0);
4970                        ret = 1;
4971                }
4972        }
4973
4974        if (event->attr.freq) {
4975                u64 now = perf_clock();
4976                s64 delta = now - hwc->freq_time_stamp;
4977
4978                hwc->freq_time_stamp = now;
4979
4980                if (delta > 0 && delta < 2*TICK_NSEC)
4981                        perf_adjust_period(event, delta, hwc->last_period, true);
4982        }
4983
4984        /*
4985         * XXX event_limit might not quite work as expected on inherited
4986         * events
4987         */
4988
4989        event->pending_kill = POLL_IN;
4990        if (events && atomic_dec_and_test(&event->event_limit)) {
4991                ret = 1;
4992                event->pending_kill = POLL_HUP;
4993                event->pending_disable = 1;
4994                irq_work_queue(&event->pending);
4995        }
4996
4997        if (event->overflow_handler)
4998                event->overflow_handler(event, data, regs);
4999        else
5000                perf_event_output(event, data, regs);
5001
5002        if (event->fasync && event->pending_kill) {
5003                event->pending_wakeup = 1;
5004                irq_work_queue(&event->pending);
5005        }
5006
5007        return ret;
5008}
5009
5010int perf_event_overflow(struct perf_event *event,
5011                          struct perf_sample_data *data,
5012                          struct pt_regs *regs)
5013{
5014        return __perf_event_overflow(event, 1, data, regs);
5015}
5016
5017/*
5018 * Generic software event infrastructure
5019 */
5020
5021struct swevent_htable {
5022        struct swevent_hlist            *swevent_hlist;
5023        struct mutex                    hlist_mutex;
5024        int                             hlist_refcount;
5025
5026        /* Recursion avoidance in each contexts */
5027        int                             recursion[PERF_NR_CONTEXTS];
5028};
5029
5030static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
5031
5032/*
5033 * We directly increment event->count and keep a second value in
5034 * event->hw.period_left to count intervals. This period event
5035 * is kept in the range [-sample_period, 0] so that we can use the
5036 * sign as trigger.
5037 */
5038
5039static u64 perf_swevent_set_period(struct perf_event *event)
5040{
5041        struct hw_perf_event *hwc = &event->hw;
5042        u64 period = hwc->last_period;
5043        u64 nr, offset;
5044        s64 old, val;
5045
5046        hwc->last_period = hwc->sample_period;
5047
5048again:
5049        old = val = local64_read(&hwc->period_left);
5050        if (val < 0)
5051                return 0;
5052
5053        nr = div64_u64(period + val, period);
5054        offset = nr * period;
5055        val -= offset;
5056        if (local64_cmpxchg(&hwc->period_left, old, val) != old)
5057                goto again;
5058
5059        return nr;
5060}
5061
5062static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
5063                                    struct perf_sample_data *data,
5064                                    struct pt_regs *regs)
5065{
5066        struct hw_perf_event *hwc = &event->hw;
5067        int throttle = 0;
5068
5069        if (!overflow)
5070                overflow = perf_swevent_set_period(event);
5071
5072        if (hwc->interrupts == MAX_INTERRUPTS)
5073                return;
5074
5075        for (; overflow; overflow--) {
5076                if (__perf_event_overflow(event, throttle,
5077                                            data, regs)) {
5078                        /*
5079                         * We inhibit the overflow from happening when
5080                         * hwc->interrupts == MAX_INTERRUPTS.
5081                         */
5082                        break;
5083                }
5084                throttle = 1;
5085        }
5086}
5087
5088static void perf_swevent_event(struct perf_event *event, u64 nr,
5089                               struct perf_sample_data *data,
5090                               struct pt_regs *regs)
5091{
5092        struct hw_perf_event *hwc = &event->hw;
5093
5094        local64_add(nr, &event->count);
5095
5096        if (!regs)
5097                return;
5098
5099        if (!is_sampling_event(event))
5100                return;
5101
5102        if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
5103                data->period = nr;
5104                return perf_swevent_overflow(event, 1, data, regs);
5105        } else
5106                data->period = event->hw.last_period;
5107
5108        if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
5109                return perf_swevent_overflow(event, 1, data, regs);
5110
5111        if (local64_add_negative(nr, &hwc->period_left))
5112                return;
5113
5114        perf_swevent_overflow(event, 0, data, regs);
5115}
5116
5117static int perf_exclude_event(struct perf_event *event,
5118                              struct pt_regs *regs)
5119{
5120        if (event->hw.state & PERF_HES_STOPPED)
5121                return 1;
5122
5123        if (regs) {
5124                if (event->attr.exclude_user && user_mode(regs))
5125                        return 1;
5126
5127                if (event->attr.exclude_kernel && !user_mode(regs))
5128                        return 1;
5129        }
5130
5131        return 0;
5132}
5133
5134static int perf_swevent_match(struct perf_event *event,
5135                                enum perf_type_id type,
5136                                u32 event_id,
5137                                struct perf_sample_data *data,
5138                                struct pt_regs *regs)
5139{
5140        if (event->attr.type != type)
5141                return 0;
5142
5143        if (event->attr.config != event_id)
5144                return 0;
5145
5146        if (perf_exclude_event(event, regs))
5147                return 0;
5148
5149        return 1;
5150}
5151
5152static inline u64 swevent_hash(u64 type, u32 event_id)
5153{
5154        u64 val = event_id | (type << 32);
5155
5156        return hash_64(val, SWEVENT_HLIST_BITS);
5157}
5158
5159static inline struct hlist_head *
5160__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
5161{
5162        u64 hash = swevent_hash(type, event_id);
5163
5164        return &hlist->heads[hash];
5165}
5166
5167/* For the read side: events when they trigger */
5168static inline struct hlist_head *
5169find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
5170{
5171        struct swevent_hlist *hlist;
5172
5173        hlist = rcu_dereference(swhash->swevent_hlist);
5174        if (!hlist)
5175                return NULL;
5176
5177        return __find_swevent_head(hlist, type, event_id);
5178}
5179
5180/* For the event head insertion and removal in the hlist */
5181static inline struct hlist_head *
5182find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
5183{
5184        struct swevent_hlist *hlist;
5185        u32 event_id = event->attr.config;
5186        u64 type = event->attr.type;
5187
5188        /*
5189         * Event scheduling is always serialized against hlist allocation
5190         * and release. Which makes the protected version suitable here.
5191         * The context lock guarantees that.
5192         */
5193        hlist = rcu_dereference_protected(swhash->swevent_hlist,
5194                                          lockdep_is_held(&event->ctx->lock));
5195        if (!hlist)
5196                return NULL;
5197
5198        return __find_swevent_head(hlist, type, event_id);
5199}
5200
5201static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
5202                                    u64 nr,
5203                                    struct perf_sample_data *data,
5204                                    struct pt_regs *regs)
5205{
5206        struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5207        struct perf_event *event;
5208        struct hlist_head *head;
5209
5210        rcu_read_lock();
5211        head = find_swevent_head_rcu(swhash, type, event_id);
5212        if (!head)
5213                goto end;
5214
5215        hlist_for_each_entry_rcu(event, head, hlist_entry) {
5216                if (perf_swevent_match(event, type, event_id, data, regs))
5217                        perf_swevent_event(event, nr, data, regs);
5218        }
5219end:
5220        rcu_read_unlock();
5221}
5222
5223int perf_swevent_get_recursion_context(void)
5224{
5225        struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5226
5227        return get_recursion_context(swhash->recursion);
5228}
5229EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
5230
5231inline void perf_swevent_put_recursion_context(int rctx)
5232{
5233        struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5234
5235        put_recursion_context(swhash->recursion, rctx);
5236}
5237
5238void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
5239{
5240        struct perf_sample_data data;
5241        int rctx;
5242
5243        preempt_disable_notrace();
5244        rctx = perf_swevent_get_recursion_context();
5245        if (rctx < 0)
5246                return;
5247
5248        perf_sample_data_init(&data, addr, 0);
5249
5250        do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
5251
5252        perf_swevent_put_recursion_context(rctx);
5253        preempt_enable_notrace();
5254}
5255
5256static void perf_swevent_read(struct perf_event *event)
5257{
5258}
5259
5260static int perf_swevent_add(struct perf_event *event, int flags)
5261{
5262        struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
5263        struct hw_perf_event *hwc = &event->hw;
5264        struct hlist_head *head;
5265
5266        if (is_sampling_event(event)) {
5267                hwc->last_period = hwc->sample_period;
5268                perf_swevent_set_period(event);
5269        }
5270
5271        hwc->state = !(flags & PERF_EF_START);
5272
5273        head = find_swevent_head(swhash, event);
5274        if (WARN_ON_ONCE(!head))
5275                return -EINVAL;
5276
5277        hlist_add_head_rcu(&event->hlist_entry, head);
5278
5279        return 0;
5280}
5281
5282static void perf_swevent_del(struct perf_event *event, int flags)
5283{
5284        hlist_del_rcu(&event->hlist_entry);
5285}
5286
5287static void perf_swevent_start(struct perf_event *event, int flags)
5288{
5289        event->hw.state = 0;
5290}
5291
5292static void perf_swevent_stop(struct perf_event *event, int flags)
5293{
5294        event->hw.state = PERF_HES_STOPPED;
5295}
5296
5297/* Deref the hlist from the update side */
5298static inline struct swevent_hlist *
5299swevent_hlist_deref(struct swevent_htable *swhash)
5300{
5301        return rcu_dereference_protected(swhash->swevent_hlist,
5302                                         lockdep_is_held(&swhash->hlist_mutex));
5303}
5304
5305static void swevent_hlist_release(struct swevent_htable *swhash)
5306{
5307        struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
5308
5309        if (!hlist)
5310                return;
5311
5312        rcu_assign_pointer(swhash->swevent_hlist, NULL);
5313        kfree_rcu(hlist, rcu_head);
5314}
5315
5316static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
5317{
5318        struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
5319
5320        mutex_lock(&swhash->hlist_mutex);
5321
5322        if (!--swhash->hlist_refcount)
5323                swevent_hlist_release(swhash);
5324
5325        mutex_unlock(&swhash->hlist_mutex);
5326}
5327
5328static void swevent_hlist_put(struct perf_event *event)
5329{
5330        int cpu;
5331
5332        if (event->cpu != -1) {
5333                swevent_hlist_put_cpu(event, event->cpu);
5334                return;
5335        }
5336
5337        for_each_possible_cpu(cpu)
5338                swevent_hlist_put_cpu(event, cpu);
5339}
5340
5341static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
5342{
5343        struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
5344        int err = 0;
5345
5346        mutex_lock(&swhash->hlist_mutex);
5347
5348        if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
5349                struct swevent_hlist *hlist;
5350
5351                hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
5352                if (!hlist) {
5353                        err = -ENOMEM;
5354                        goto exit;
5355                }
5356                rcu_assign_pointer(swhash->swevent_hlist, hlist);
5357        }
5358        swhash->hlist_refcount++;
5359exit:
5360        mutex_unlock(&swhash->hlist_mutex);
5361
5362        return err;
5363}
5364
5365static int swevent_hlist_get(struct perf_event *event)
5366{
5367        int err;
5368        int cpu, failed_cpu;
5369
5370        if (event->cpu != -1)
5371                return swevent_hlist_get_cpu(event, event->cpu);
5372
5373        get_online_cpus();
5374        for_each_possible_cpu(cpu) {
5375                err = swevent_hlist_get_cpu(event, cpu);
5376                if (err) {
5377                        failed_cpu = cpu;
5378                        goto fail;
5379                }
5380        }
5381        put_online_cpus();
5382
5383        return 0;
5384fail:
5385        for_each_possible_cpu(cpu) {
5386                if (cpu == failed_cpu)
5387                        break;
5388                swevent_hlist_put_cpu(event, cpu);
5389        }
5390
5391        put_online_cpus();
5392        return err;
5393}
5394
5395struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
5396
5397static void sw_perf_event_destroy(struct perf_event *event)
5398{
5399        u64 event_id = event->attr.config;
5400
5401        WARN_ON(event->parent);
5402
5403        static_key_slow_dec(&perf_swevent_enabled[event_id]);
5404        swevent_hlist_put(event);
5405}
5406
5407static int perf_swevent_init(struct perf_event *event)
5408{
5409        u64 event_id = event->attr.config;
5410
5411        if (event->attr.type != PERF_TYPE_SOFTWARE)
5412                return -ENOENT;
5413
5414        /*
5415         * no branch sampling for software events
5416         */
5417        if (has_branch_stack(event))
5418                return -EOPNOTSUPP;
5419
5420        switch (event_id) {
5421        case PERF_COUNT_SW_CPU_CLOCK:
5422        case PERF_COUNT_SW_TASK_CLOCK:
5423                return -ENOENT;
5424
5425        default:
5426                break;
5427        }
5428
5429        if (event_id >= PERF_COUNT_SW_MAX)
5430                return -ENOENT;
5431
5432        if (!event->parent) {
5433                int err;
5434
5435                err = swevent_hlist_get(event);
5436                if (err)
5437                        return err;
5438
5439                static_key_slow_inc(&perf_swevent_enabled[event_id]);
5440                event->destroy = sw_perf_event_destroy;
5441        }
5442
5443        return 0;
5444}
5445
5446static int perf_swevent_event_idx(struct perf_event *event)
5447{
5448        return 0;
5449}
5450
5451static struct pmu perf_swevent = {
5452        .task_ctx_nr    = perf_sw_context,
5453
5454        .event_init     = perf_swevent_init,
5455        .add            = perf_swevent_add,
5456        .del            = perf_swevent_del,
5457        .start          = perf_swevent_start,
5458        .stop           = perf_swevent_stop,
5459        .read           = perf_swevent_read,
5460
5461        .event_idx      = perf_swevent_event_idx,
5462};
5463
5464#ifdef CONFIG_EVENT_TRACING
5465
5466static int perf_tp_filter_match(struct perf_event *event,
5467                                struct perf_sample_data *data)
5468{
5469        void *record = data->raw->data;
5470
5471        if (likely(!event->filter) || filter_match_preds(event->filter, record))
5472                return 1;
5473        return 0;
5474}
5475
5476static int perf_tp_event_match(struct perf_event *event,
5477                                struct perf_sample_data *data,
5478                                struct pt_regs *regs)
5479{
5480        if (event->hw.state & PERF_HES_STOPPED)
5481                return 0;
5482        /*
5483         * All tracepoints are from kernel-space.
5484         */
5485        if (event->attr.exclude_kernel)
5486                return 0;
5487
5488        if (!perf_tp_filter_match(event, data))
5489                return 0;
5490
5491        return 1;
5492}
5493
5494void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
5495                   struct pt_regs *regs, struct hlist_head *head, int rctx,
5496                   struct task_struct *task)
5497{
5498        struct perf_sample_data data;
5499        struct perf_event *event;
5500
5501        struct perf_raw_record raw = {
5502                .size = entry_size,
5503                .data = record,
5504        };
5505
5506        perf_sample_data_init(&data, addr, 0);
5507        data.raw = &raw;
5508
5509        hlist_for_each_entry_rcu(event, head, hlist_entry) {
5510                if (perf_tp_event_match(event, &data, regs))
5511                        perf_swevent_event(event, count, &data, regs);
5512        }
5513
5514        /*
5515         * If we got specified a target task, also iterate its context and
5516         * deliver this event there too.
5517         */
5518        if (task && task != current) {
5519                struct perf_event_context *ctx;
5520                struct trace_entry *entry = record;
5521
5522                rcu_read_lock();
5523                ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
5524                if (!ctx)
5525                        goto unlock;
5526
5527                list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
5528                        if (event->attr.type != PERF_TYPE_TRACEPOINT)
5529                                continue;
5530                        if (event->attr.config != entry->type)
5531                                continue;
5532                        if (perf_tp_event_match(event, &data, regs))
5533                                perf_swevent_event(event, count, &data, regs);
5534                }
5535unlock:
5536                rcu_read_unlock();
5537        }
5538
5539        perf_swevent_put_recursion_context(rctx);
5540}
5541EXPORT_SYMBOL_GPL(perf_tp_event);
5542
5543static void tp_perf_event_destroy(struct perf_event *event)
5544{
5545        perf_trace_destroy(event);
5546}
5547
5548static int perf_tp_event_init(struct perf_event *event)
5549{
5550        int err;
5551
5552        if (event->attr.type != PERF_TYPE_TRACEPOINT)
5553                return -ENOENT;
5554
5555        /*
5556         * no branch sampling for tracepoint events
5557         */
5558        if (has_branch_stack(event))
5559                return -EOPNOTSUPP;
5560
5561        err = perf_trace_init(event);
5562        if (err)
5563                return err;
5564
5565        event->destroy = tp_perf_event_destroy;
5566
5567        return 0;
5568}
5569
5570static struct pmu perf_tracepoint = {
5571        .task_ctx_nr    = perf_sw_context,
5572
5573        .event_init     = perf_tp_event_init,
5574        .add            = perf_trace_add,
5575        .del            = perf_trace_del,
5576        .start          = perf_swevent_start,
5577        .stop           = perf_swevent_stop,
5578        .read           = perf_swevent_read,
5579
5580        .event_idx      = perf_swevent_event_idx,
5581};
5582
5583static inline void perf_tp_register(void)
5584{
5585        perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
5586}
5587
5588static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5589{
5590        char *filter_str;
5591        int ret;
5592
5593        if (event->attr.type != PERF_TYPE_TRACEPOINT)
5594                return -EINVAL;
5595
5596        filter_str = strndup_user(arg, PAGE_SIZE);
5597        if (IS_ERR(filter_str))
5598                return PTR_ERR(filter_str);
5599
5600        ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
5601
5602        kfree(filter_str);
5603        return ret;
5604}
5605
5606static void perf_event_free_filter(struct perf_event *event)
5607{
5608        ftrace_profile_free_filter(event);
5609}
5610
5611#else
5612
5613static inline void perf_tp_register(void)
5614{
5615}
5616
5617static int perf_event_set_filter(struct perf_event *event, void __user *arg)
5618{
5619        return -ENOENT;
5620}
5621
5622static void perf_event_free_filter(struct perf_event *event)
5623{
5624}
5625
5626#endif /* CONFIG_EVENT_TRACING */
5627
5628#ifdef CONFIG_HAVE_HW_BREAKPOINT
5629void perf_bp_event(struct perf_event *bp, void *data)
5630{
5631        struct perf_sample_data sample;
5632        struct pt_regs *regs = data;
5633
5634        perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
5635
5636        if (!bp->hw.state && !perf_exclude_event(bp, regs))
5637                perf_swevent_event(bp, 1, &sample, regs);
5638}
5639#endif
5640
5641/*
5642 * hrtimer based swevent callback
5643 */
5644
5645static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
5646{
5647        enum hrtimer_restart ret = HRTIMER_RESTART;
5648        struct perf_sample_data data;
5649        struct pt_regs *regs;
5650        struct perf_event *event;
5651        u64 period;
5652
5653        event = container_of(hrtimer, struct perf_event, hw.hrtimer);
5654
5655        if (event->state != PERF_EVENT_STATE_ACTIVE)
5656                return HRTIMER_NORESTART;
5657
5658        event->pmu->read(event);
5659
5660        perf_sample_data_init(&data, 0, event->hw.last_period);
5661        regs = get_irq_regs();
5662
5663        if (regs && !perf_exclude_event(event, regs)) {
5664                if (!(event->attr.exclude_idle && is_idle_task(current)))
5665                        if (__perf_event_overflow(event, 1, &data, regs))
5666                                ret = HRTIMER_NORESTART;
5667        }
5668
5669        period = max_t(u64, 10000, event->hw.sample_period);
5670        hrtimer_forward_now(hrtimer, ns_to_ktime(period));
5671
5672        return ret;
5673}
5674
5675static void perf_swevent_start_hrtimer(struct perf_event *event)
5676{
5677        struct hw_perf_event *hwc = &event->hw;
5678        s64 period;
5679
5680        if (!is_sampling_event(event))
5681                return;
5682
5683        period = local64_read(&hwc->period_left);
5684        if (period) {
5685                if (period < 0)
5686                        period = 10000;
5687
5688                local64_set(&hwc->period_left, 0);
5689        } else {
5690                period = max_t(u64, 10000, hwc->sample_period);
5691        }
5692        __hrtimer_start_range_ns(&hwc->hrtimer,
5693                                ns_to_ktime(period), 0,
5694                                HRTIMER_MODE_REL_PINNED, 0);
5695}
5696
5697static void perf_swevent_cancel_hrtimer(struct perf_event *event)
5698{
5699        struct hw_perf_event *hwc = &event->hw;
5700
5701        if (is_sampling_event(event)) {
5702                ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
5703                local64_set(&hwc->period_left, ktime_to_ns(remaining));
5704
5705                hrtimer_cancel(&hwc->hrtimer);
5706        }
5707}
5708
5709static void perf_swevent_init_hrtimer(struct perf_event *event)
5710{
5711        struct hw_perf_event *hwc = &event->hw;
5712
5713        if (!is_sampling_event(event))
5714                return;
5715
5716        hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
5717        hwc->hrtimer.function = perf_swevent_hrtimer;
5718
5719        /*
5720         * Since hrtimers have a fixed rate, we can do a static freq->period
5721         * mapping and avoid the whole period adjust feedback stuff.
5722         */
5723        if (event->attr.freq) {
5724                long freq = event->attr.sample_freq;
5725
5726                event->attr.sample_period = NSEC_PER_SEC / freq;
5727                hwc->sample_period = event->attr.sample_period;
5728                local64_set(&hwc->period_left, hwc->sample_period);
5729                hwc->last_period = hwc->sample_period;
5730                event->attr.freq = 0;
5731        }
5732}
5733
5734/*
5735 * Software event: cpu wall time clock
5736 */
5737
5738static void cpu_clock_event_update(struct perf_event *event)
5739{
5740        s64 prev;
5741        u64 now;
5742
5743        now = local_clock();
5744        prev = local64_xchg(&event->hw.prev_count, now);
5745        local64_add(now - prev, &event->count);
5746}
5747
5748static void cpu_clock_event_start(struct perf_event *event, int flags)
5749{
5750        local64_set(&event->hw.prev_count, local_clock());
5751        perf_swevent_start_hrtimer(event);
5752}
5753
5754static void cpu_clock_event_stop(struct perf_event *event, int flags)
5755{
5756        perf_swevent_cancel_hrtimer(event);
5757        cpu_clock_event_update(event);
5758}
5759
5760static int cpu_clock_event_add(struct perf_event *event, int flags)
5761{
5762        if (flags & PERF_EF_START)
5763                cpu_clock_event_start(event, flags);
5764
5765        return 0;
5766}
5767
5768static void cpu_clock_event_del(struct perf_event *event, int flags)
5769{
5770        cpu_clock_event_stop(event, flags);
5771}
5772
5773static void cpu_clock_event_read(struct perf_event *event)
5774{
5775        cpu_clock_event_update(event);
5776}
5777
5778static int cpu_clock_event_init(struct perf_event *event)
5779{
5780        if (event->attr.type != PERF_TYPE_SOFTWARE)
5781                return -ENOENT;
5782
5783        if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
5784                return -ENOENT;
5785
5786        /*
5787         * no branch sampling for software events
5788         */
5789        if (has_branch_stack(event))
5790                return -EOPNOTSUPP;
5791
5792        perf_swevent_init_hrtimer(event);
5793
5794        return 0;
5795}
5796
5797static struct pmu perf_cpu_clock = {
5798        .task_ctx_nr    = perf_sw_context,
5799
5800        .event_init     = cpu_clock_event_init,
5801        .add            = cpu_clock_event_add,
5802        .del            = cpu_clock_event_del,
5803        .start          = cpu_clock_event_start,
5804        .stop           = cpu_clock_event_stop,
5805        .read           = cpu_clock_event_read,
5806
5807        .event_idx      = perf_swevent_event_idx,
5808};
5809
5810/*
5811 * Software event: task time clock
5812 */
5813
5814static void task_clock_event_update(struct perf_event *event, u64 now)
5815{
5816        u64 prev;
5817        s64 delta;
5818
5819        prev = local64_xchg(&event->hw.prev_count, now);
5820        delta = now - prev;
5821        local64_add(delta, &event->count);
5822}
5823
5824static void task_clock_event_start(struct perf_event *event, int flags)
5825{
5826        local64_set(&event->hw.prev_count, event->ctx->time);
5827        perf_swevent_start_hrtimer(event);
5828}
5829
5830static void task_clock_event_stop(struct perf_event *event, int flags)
5831{
5832        perf_swevent_cancel_hrtimer(event);
5833        task_clock_event_update(event, event->ctx->time);
5834}
5835
5836static int task_clock_event_add(struct perf_event *event, int flags)
5837{
5838        if (flags & PERF_EF_START)
5839                task_clock_event_start(event, flags);
5840
5841        return 0;
5842}
5843
5844static void task_clock_event_del(struct perf_event *event, int flags)
5845{
5846        task_clock_event_stop(event, PERF_EF_UPDATE);
5847}
5848
5849static void task_clock_event_read(struct perf_event *event)
5850{
5851        u64 now = perf_clock();
5852        u64 delta = now - event->ctx->timestamp;
5853        u64 time = event->ctx->time + delta;
5854
5855        task_clock_event_update(event, time);
5856}
5857
5858static int task_clock_event_init(struct perf_event *event)
5859{
5860        if (event->attr.type != PERF_TYPE_SOFTWARE)
5861                return -ENOENT;
5862
5863        if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
5864                return -ENOENT;
5865
5866        /*
5867         * no branch sampling for software events
5868         */
5869        if (has_branch_stack(event))
5870                return -EOPNOTSUPP;
5871
5872        perf_swevent_init_hrtimer(event);
5873
5874        return 0;
5875}
5876
5877static struct pmu perf_task_clock = {
5878        .task_ctx_nr    = perf_sw_context,
5879
5880        .event_init     = task_clock_event_init,
5881        .add            = task_clock_event_add,
5882        .del            = task_clock_event_del,
5883        .start          = task_clock_event_start,
5884        .stop           = task_clock_event_stop,
5885        .read           = task_clock_event_read,
5886
5887        .event_idx      = perf_swevent_event_idx,
5888};
5889
5890static void perf_pmu_nop_void(struct pmu *pmu)
5891{
5892}
5893
5894static int perf_pmu_nop_int(struct pmu *pmu)
5895{
5896        return 0;
5897}
5898
5899static void perf_pmu_start_txn(struct pmu *pmu)
5900{
5901        perf_pmu_disable(pmu);
5902}
5903
5904static int perf_pmu_commit_txn(struct pmu *pmu)
5905{
5906        perf_pmu_enable(pmu);
5907        return 0;
5908}
5909
5910static void perf_pmu_cancel_txn(struct pmu *pmu)
5911{
5912        perf_pmu_enable(pmu);
5913}
5914
5915static int perf_event_idx_default(struct perf_event *event)
5916{
5917        return event->hw.idx + 1;
5918}
5919
5920/*
5921 * Ensures all contexts with the same task_ctx_nr have the same
5922 * pmu_cpu_context too.
5923 */
5924static void *find_pmu_context(int ctxn)
5925{
5926        struct pmu *pmu;
5927
5928        if (ctxn < 0)
5929                return NULL;
5930
5931        list_for_each_entry(pmu, &pmus, entry) {
5932                if (pmu->task_ctx_nr == ctxn)
5933                        return pmu->pmu_cpu_context;
5934        }
5935
5936        return NULL;
5937}
5938
5939static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
5940{
5941        int cpu;
5942
5943        for_each_possible_cpu(cpu) {
5944                struct perf_cpu_context *cpuctx;
5945
5946                cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
5947
5948                if (cpuctx->unique_pmu == old_pmu)
5949                        cpuctx->unique_pmu = pmu;
5950        }
5951}
5952
5953static void free_pmu_context(struct pmu *pmu)
5954{
5955        struct pmu *i;
5956
5957        mutex_lock(&pmus_lock);
5958        /*
5959         * Like a real lame refcount.
5960         */
5961        list_for_each_entry(i, &pmus, entry) {
5962                if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
5963                        update_pmu_context(i, pmu);
5964                        goto out;
5965                }
5966        }
5967
5968        free_percpu(pmu->pmu_cpu_context);
5969out:
5970        mutex_unlock(&pmus_lock);
5971}
5972static struct idr pmu_idr;
5973
5974static ssize_t
5975type_show(struct device *dev, struct device_attribute *attr, char *page)
5976{
5977        struct pmu *pmu = dev_get_drvdata(dev);
5978
5979        return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
5980}
5981
5982static struct device_attribute pmu_dev_attrs[] = {
5983       __ATTR_RO(type),
5984       __ATTR_NULL,
5985};
5986
5987static int pmu_bus_running;
5988static struct bus_type pmu_bus = {
5989        .name           = "event_source",
5990        .dev_attrs      = pmu_dev_attrs,
5991};
5992
5993static void pmu_dev_release(struct device *dev)
5994{
5995        kfree(dev);
5996}
5997
5998static int pmu_dev_alloc(struct pmu *pmu)
5999{
6000        int ret = -ENOMEM;
6001
6002        pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
6003        if (!pmu->dev)
6004                goto out;
6005
6006        pmu->dev->groups = pmu->attr_groups;
6007        device_initialize(pmu->dev);
6008        ret = dev_set_name(pmu->dev, "%s", pmu->name);
6009        if (ret)
6010                goto free_dev;
6011
6012        dev_set_drvdata(pmu->dev, pmu);
6013        pmu->dev->bus = &pmu_bus;
6014        pmu->dev->release = pmu_dev_release;
6015        ret = device_add(pmu->dev);
6016        if (ret)
6017                goto free_dev;
6018
6019out:
6020        return ret;
6021
6022free_dev:
6023        put_device(pmu->dev);
6024        goto out;
6025}
6026
6027static struct lock_class_key cpuctx_mutex;
6028static struct lock_class_key cpuctx_lock;
6029
6030int perf_pmu_register(struct pmu *pmu, char *name, int type)
6031{
6032        int cpu, ret;
6033
6034        mutex_lock(&pmus_lock);
6035        ret = -ENOMEM;
6036        pmu->pmu_disable_count = alloc_percpu(int);
6037        if (!pmu->pmu_disable_count)
6038                goto unlock;
6039
6040        pmu->type = -1;
6041        if (!name)
6042                goto skip_type;
6043        pmu->name = name;
6044
6045        if (type < 0) {
6046                type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
6047                if (type < 0) {
6048                        ret = type;
6049                        goto free_pdc;
6050                }
6051        }
6052        pmu->type = type;
6053
6054        if (pmu_bus_running) {
6055                ret = pmu_dev_alloc(pmu);
6056                if (ret)
6057                        goto free_idr;
6058        }
6059
6060skip_type:
6061        pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
6062        if (pmu->pmu_cpu_context)
6063                goto got_cpu_context;
6064
6065        ret = -ENOMEM;
6066        pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
6067        if (!pmu->pmu_cpu_context)
6068                goto free_dev;
6069
6070        for_each_possible_cpu(cpu) {
6071                struct perf_cpu_context *cpuctx;
6072
6073                cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
6074                __perf_event_init_context(&cpuctx->ctx);
6075                lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
6076                lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
6077                cpuctx->ctx.type = cpu_context;
6078                cpuctx->ctx.pmu = pmu;
6079                cpuctx->jiffies_interval = 1;
6080                INIT_LIST_HEAD(&cpuctx->rotation_list);
6081                cpuctx->unique_pmu = pmu;
6082        }
6083
6084got_cpu_context:
6085        if (!pmu->start_txn) {
6086                if (pmu->pmu_enable) {
6087                        /*
6088                         * If we have pmu_enable/pmu_disable calls, install
6089                         * transaction stubs that use that to try and batch
6090                         * hardware accesses.
6091                         */
6092                        pmu->start_txn  = perf_pmu_start_txn;
6093                        pmu->commit_txn = perf_pmu_commit_txn;
6094                        pmu->cancel_txn = perf_pmu_cancel_txn;
6095                } else {
6096                        pmu->start_txn  = perf_pmu_nop_void;
6097                        pmu->commit_txn = perf_pmu_nop_int;
6098                        pmu->cancel_txn = perf_pmu_nop_void;
6099                }
6100        }
6101
6102        if (!pmu->pmu_enable) {
6103                pmu->pmu_enable  = perf_pmu_nop_void;
6104                pmu->pmu_disable = perf_pmu_nop_void;
6105        }
6106
6107        if (!pmu->event_idx)
6108                pmu->event_idx = perf_event_idx_default;
6109
6110        list_add_rcu(&pmu->entry, &pmus);
6111        ret = 0;
6112unlock:
6113        mutex_unlock(&pmus_lock);
6114
6115        return ret;
6116
6117free_dev:
6118        device_del(pmu->dev);
6119        put_device(pmu->dev);
6120
6121free_idr:
6122        if (pmu->type >= PERF_TYPE_MAX)
6123                idr_remove(&pmu_idr, pmu->type);
6124
6125free_pdc:
6126        free_percpu(pmu->pmu_disable_count);
6127        goto unlock;
6128}
6129
6130void perf_pmu_unregister(struct pmu *pmu)
6131{
6132        mutex_lock(&pmus_lock);
6133        list_del_rcu(&pmu->entry);
6134        mutex_unlock(&pmus_lock);
6135
6136        /*
6137         * We dereference the pmu list under both SRCU and regular RCU, so
6138         * synchronize against both of those.
6139         */
6140        synchronize_srcu(&pmus_srcu);
6141        synchronize_rcu();
6142
6143        free_percpu(pmu->pmu_disable_count);
6144        if (pmu->type >= PERF_TYPE_MAX)
6145                idr_remove(&pmu_idr, pmu->type);
6146        device_del(pmu->dev);
6147        put_device(pmu->dev);
6148        free_pmu_context(pmu);
6149}
6150
6151struct pmu *perf_init_event(struct perf_event *event)
6152{
6153        struct pmu *pmu = NULL;
6154        int idx;
6155        int ret;
6156
6157        idx = srcu_read_lock(&pmus_srcu);
6158
6159        rcu_read_lock();
6160        pmu = idr_find(&pmu_idr, event->attr.type);
6161        rcu_read_unlock();
6162        if (pmu) {
6163                event->pmu = pmu;
6164                ret = pmu->event_init(event);
6165                if (ret)
6166                        pmu = ERR_PTR(ret);
6167                goto unlock;
6168        }
6169
6170        list_for_each_entry_rcu(pmu, &pmus, entry) {
6171                event->pmu = pmu;
6172                ret = pmu->event_init(event);
6173                if (!ret)
6174                        goto unlock;
6175
6176                if (ret != -ENOENT) {
6177                        pmu = ERR_PTR(ret);
6178                        goto unlock;
6179                }
6180        }
6181        pmu = ERR_PTR(-ENOENT);
6182unlock:
6183        srcu_read_unlock(&pmus_srcu, idx);
6184
6185        return pmu;
6186}
6187
6188/*
6189 * Allocate and initialize a event structure
6190 */
6191static struct perf_event *
6192perf_event_alloc(struct perf_event_attr *attr, int cpu,
6193                 struct task_struct *task,
6194                 struct perf_event *group_leader,
6195                 struct perf_event *parent_event,
6196                 perf_overflow_handler_t overflow_handler,
6197                 void *context)
6198{
6199        struct pmu *pmu;
6200        struct perf_event *event;
6201        struct hw_perf_event *hwc;
6202        long err;
6203
6204        if ((unsigned)cpu >= nr_cpu_ids) {
6205                if (!task || cpu != -1)
6206                        return ERR_PTR(-EINVAL);
6207        }
6208
6209        event = kzalloc(sizeof(*event), GFP_KERNEL);
6210        if (!event)
6211                return ERR_PTR(-ENOMEM);
6212
6213        /*
6214         * Single events are their own group leaders, with an
6215         * empty sibling list:
6216         */
6217        if (!group_leader)
6218                group_leader = event;
6219
6220        mutex_init(&event->child_mutex);
6221        INIT_LIST_HEAD(&event->child_list);
6222
6223        INIT_LIST_HEAD(&event->group_entry);
6224        INIT_LIST_HEAD(&event->event_entry);
6225        INIT_LIST_HEAD(&event->sibling_list);
6226        INIT_LIST_HEAD(&event->rb_entry);
6227
6228        init_waitqueue_head(&event->waitq);
6229        init_irq_work(&event->pending, perf_pending_event);
6230
6231        mutex_init(&event->mmap_mutex);
6232
6233        atomic_long_set(&event->refcount, 1);
6234        event->cpu              = cpu;
6235        event->attr             = *attr;
6236        event->group_leader     = group_leader;
6237        event->pmu              = NULL;
6238        event->oncpu            = -1;
6239
6240        event->parent           = parent_event;
6241
6242        event->ns               = get_pid_ns(task_active_pid_ns(current));
6243        event->id               = atomic64_inc_return(&perf_event_id);
6244
6245        event->state            = PERF_EVENT_STATE_INACTIVE;
6246
6247        if (task) {
6248                event->attach_state = PERF_ATTACH_TASK;
6249
6250                if (attr->type == PERF_TYPE_TRACEPOINT)
6251                        event->hw.tp_target = task;
6252#ifdef CONFIG_HAVE_HW_BREAKPOINT
6253                /*
6254                 * hw_breakpoint is a bit difficult here..
6255                 */
6256                else if (attr->type == PERF_TYPE_BREAKPOINT)
6257                        event->hw.bp_target = task;
6258#endif
6259        }
6260
6261        if (!overflow_handler && parent_event) {
6262                overflow_handler = parent_event->overflow_handler;
6263                context = parent_event->overflow_handler_context;
6264        }
6265
6266        event->overflow_handler = overflow_handler;
6267        event->overflow_handler_context = context;
6268
6269        perf_event__state_init(event);
6270
6271        pmu = NULL;
6272
6273        hwc = &event->hw;
6274        hwc->sample_period = attr->sample_period;
6275        if (attr->freq && attr->sample_freq)
6276                hwc->sample_period = 1;
6277        hwc->last_period = hwc->sample_period;
6278
6279        local64_set(&hwc->period_left, hwc->sample_period);
6280
6281        /*
6282         * we currently do not support PERF_FORMAT_GROUP on inherited events
6283         */
6284        if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
6285                goto done;
6286
6287        pmu = perf_init_event(event);
6288
6289done:
6290        err = 0;
6291        if (!pmu)
6292                err = -EINVAL;
6293        else if (IS_ERR(pmu))
6294                err = PTR_ERR(pmu);
6295
6296        if (err) {
6297                if (event->ns)
6298                        put_pid_ns(event->ns);
6299                kfree(event);
6300                return ERR_PTR(err);
6301        }
6302
6303        if (!event->parent) {
6304                if (event->attach_state & PERF_ATTACH_TASK)
6305                        static_key_slow_inc(&perf_sched_events.key);
6306                if (event->attr.mmap || event->attr.mmap_data)
6307                        atomic_inc(&nr_mmap_events);
6308                if (event->attr.comm)
6309                        atomic_inc(&nr_comm_events);
6310                if (event->attr.task)
6311                        atomic_inc(&nr_task_events);
6312                if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
6313                        err = get_callchain_buffers();
6314                        if (err) {
6315                                free_event(event);
6316                                return ERR_PTR(err);
6317                        }
6318                }
6319                if (has_branch_stack(event)) {
6320                        static_key_slow_inc(&perf_sched_events.key);
6321                        if (!(event->attach_state & PERF_ATTACH_TASK))
6322                                atomic_inc(&per_cpu(perf_branch_stack_events,
6323                                                    event->cpu));
6324                }
6325        }
6326
6327        return event;
6328}
6329
6330static int perf_copy_attr(struct perf_event_attr __user *uattr,
6331                          struct perf_event_attr *attr)
6332{
6333        u32 size;
6334        int ret;
6335
6336        if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
6337                return -EFAULT;
6338
6339        /*
6340         * zero the full structure, so that a short copy will be nice.
6341         */
6342        memset(attr, 0, sizeof(*attr));
6343
6344        ret = get_user(size, &uattr->size);
6345        if (ret)
6346                return ret;
6347
6348        if (size > PAGE_SIZE)   /* silly large */
6349                goto err_size;
6350
6351        if (!size)              /* abi compat */
6352                size = PERF_ATTR_SIZE_VER0;
6353
6354        if (size < PERF_ATTR_SIZE_VER0)
6355                goto err_size;
6356
6357        /*
6358         * If we're handed a bigger struct than we know of,
6359         * ensure all the unknown bits are 0 - i.e. new
6360         * user-space does not rely on any kernel feature
6361         * extensions we dont know about yet.
6362         */
6363        if (size > sizeof(*attr)) {
6364                unsigned char __user *addr;
6365                unsigned char __user *end;
6366                unsigned char val;
6367
6368                addr = (void __user *)uattr + sizeof(*attr);
6369                end  = (void __user *)uattr + size;
6370
6371                for (; addr < end; addr++) {
6372                        ret = get_user(val, addr);
6373                        if (ret)
6374                                return ret;
6375                        if (val)
6376                                goto err_size;
6377                }
6378                size = sizeof(*attr);
6379        }
6380
6381        ret = copy_from_user(attr, uattr, size);
6382        if (ret)
6383                return -EFAULT;
6384
6385        if (attr->__reserved_1)
6386                return -EINVAL;
6387
6388        if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
6389                return -EINVAL;
6390
6391        if (attr->read_format & ~(PERF_FORMAT_MAX-1))
6392                return -EINVAL;
6393
6394        if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
6395                u64 mask = attr->branch_sample_type;
6396
6397                /* only using defined bits */
6398                if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
6399                        return -EINVAL;
6400
6401                /* at least one branch bit must be set */
6402                if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
6403                        return -EINVAL;
6404
6405                /* kernel level capture: check permissions */
6406                if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
6407                    && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
6408                        return -EACCES;
6409
6410                /* propagate priv level, when not set for branch */
6411                if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
6412
6413                        /* exclude_kernel checked on syscall entry */
6414                        if (!attr->exclude_kernel)
6415                                mask |= PERF_SAMPLE_BRANCH_KERNEL;
6416
6417                        if (!attr->exclude_user)
6418                                mask |= PERF_SAMPLE_BRANCH_USER;
6419
6420                        if (!attr->exclude_hv)
6421                                mask |= PERF_SAMPLE_BRANCH_HV;
6422                        /*
6423                         * adjust user setting (for HW filter setup)
6424                         */
6425                        attr->branch_sample_type = mask;
6426                }
6427        }
6428
6429        if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
6430                ret = perf_reg_validate(attr->sample_regs_user);
6431                if (ret)
6432                        return ret;
6433        }
6434
6435        if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
6436                if (!arch_perf_have_user_stack_dump())
6437                        return -ENOSYS;
6438
6439                /*
6440                 * We have __u32 type for the size, but so far
6441                 * we can only use __u16 as maximum due to the
6442                 * __u16 sample size limit.
6443                 */
6444                if (attr->sample_stack_user >= USHRT_MAX)
6445                        ret = -EINVAL;
6446                else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
6447                        ret = -EINVAL;
6448        }
6449
6450out:
6451        return ret;
6452
6453err_size:
6454        put_user(sizeof(*attr), &uattr->size);
6455        ret = -E2BIG;
6456        goto out;
6457}
6458
6459static int
6460perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
6461{
6462        struct ring_buffer *rb = NULL, *old_rb = NULL;
6463        int ret = -EINVAL;
6464
6465        if (!output_event)
6466                goto set;
6467
6468        /* don't allow circular references */
6469        if (event == output_event)
6470                goto out;
6471
6472        /*
6473         * Don't allow cross-cpu buffers
6474         */
6475        if (output_event->cpu != event->cpu)
6476                goto out;
6477
6478        /*
6479         * If its not a per-cpu rb, it must be the same task.
6480         */
6481        if (output_event->cpu == -1 && output_event->ctx != event->ctx)
6482                goto out;
6483
6484set:
6485        mutex_lock(&event->mmap_mutex);
6486        /* Can't redirect output if we've got an active mmap() */
6487        if (atomic_read(&event->mmap_count))
6488                goto unlock;
6489
6490        old_rb = event->rb;
6491
6492        if (output_event) {
6493                /* get the rb we want to redirect to */
6494                rb = ring_buffer_get(output_event);
6495                if (!rb)
6496                        goto unlock;
6497        }
6498
6499        if (old_rb)
6500                ring_buffer_detach(event, old_rb);
6501
6502        if (rb)
6503                ring_buffer_attach(event, rb);
6504
6505        rcu_assign_pointer(event->rb, rb);
6506
6507        if (old_rb) {
6508                ring_buffer_put(old_rb);
6509                /*
6510                 * Since we detached before setting the new rb, so that we
6511                 * could attach the new rb, we could have missed a wakeup.
6512                 * Provide it now.
6513                 */
6514                wake_up_all(&event->waitq);
6515        }
6516
6517        ret = 0;
6518unlock:
6519        mutex_unlock(&event->mmap_mutex);
6520
6521out:
6522        return ret;
6523}
6524
6525/**
6526 * sys_perf_event_open - open a performance event, associate it to a task/cpu
6527 *
6528 * @attr_uptr:  event_id type attributes for monitoring/sampling
6529 * @pid:                target pid
6530 * @cpu:                target cpu
6531 * @group_fd:           group leader event fd
6532 */
6533SYSCALL_DEFINE5(perf_event_open,
6534                struct perf_event_attr __user *, attr_uptr,
6535                pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
6536{
6537        struct perf_event *group_leader = NULL, *output_event = NULL;
6538        struct perf_event *event, *sibling;
6539        struct perf_event_attr attr;
6540        struct perf_event_context *ctx;
6541        struct file *event_file = NULL;
6542        struct fd group = {NULL, 0};
6543        struct task_struct *task = NULL;
6544        struct pmu *pmu;
6545        int event_fd;
6546        int move_group = 0;
6547        int err;
6548
6549        /* for future expandability... */
6550        if (flags & ~PERF_FLAG_ALL)
6551                return -EINVAL;
6552
6553        err = perf_copy_attr(attr_uptr, &attr);
6554        if (err)
6555                return err;
6556
6557        if (!attr.exclude_kernel) {
6558                if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
6559                        return -EACCES;
6560        }
6561
6562        if (attr.freq) {
6563                if (attr.sample_freq > sysctl_perf_event_sample_rate)
6564                        return -EINVAL;
6565        }
6566
6567        /*
6568         * In cgroup mode, the pid argument is used to pass the fd
6569         * opened to the cgroup directory in cgroupfs. The cpu argument
6570         * designates the cpu on which to monitor threads from that
6571         * cgroup.
6572         */
6573        if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
6574                return -EINVAL;
6575
6576        event_fd = get_unused_fd();
6577        if (event_fd < 0)
6578                return event_fd;
6579
6580        if (group_fd != -1) {
6581                err = perf_fget_light(group_fd, &group);
6582                if (err)
6583                        goto err_fd;
6584                group_leader = group.file->private_data;
6585                if (flags & PERF_FLAG_FD_OUTPUT)
6586                        output_event = group_leader;
6587                if (flags & PERF_FLAG_FD_NO_GROUP)
6588                        group_leader = NULL;
6589        }
6590
6591        if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
6592                task = find_lively_task_by_vpid(pid);
6593                if (IS_ERR(task)) {
6594                        err = PTR_ERR(task);
6595                        goto err_group_fd;
6596                }
6597        }
6598
6599        get_online_cpus();
6600
6601        event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
6602                                 NULL, NULL);
6603        if (IS_ERR(event)) {
6604                err = PTR_ERR(event);
6605                goto err_task;
6606        }
6607
6608        if (flags & PERF_FLAG_PID_CGROUP) {
6609                err = perf_cgroup_connect(pid, event, &attr, group_leader);
6610                if (err)
6611                        goto err_alloc;
6612                /*
6613                 * one more event:
6614                 * - that has cgroup constraint on event->cpu
6615                 * - that may need work on context switch
6616                 */
6617                atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
6618                static_key_slow_inc(&perf_sched_events.key);
6619        }
6620
6621        /*
6622         * Special case software events and allow them to be part of
6623         * any hardware group.
6624         */
6625        pmu = event->pmu;
6626
6627        if (group_leader &&
6628            (is_software_event(event) != is_software_event(group_leader))) {
6629                if (is_software_event(event)) {
6630                        /*
6631                         * If event and group_leader are not both a software
6632                         * event, and event is, then group leader is not.
6633                         *
6634                         * Allow the addition of software events to !software
6635                         * groups, this is safe because software events never
6636                         * fail to schedule.
6637                         */
6638                        pmu = group_leader->pmu;
6639                } else if (is_software_event(group_leader) &&
6640                           (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
6641                        /*
6642                         * In case the group is a pure software group, and we
6643                         * try to add a hardware event, move the whole group to
6644                         * the hardware context.
6645                         */
6646                        move_group = 1;
6647                }
6648        }
6649
6650        /*
6651         * Get the target context (task or percpu):
6652         */
6653        ctx = find_get_context(pmu, task, event->cpu);
6654        if (IS_ERR(ctx)) {
6655                err = PTR_ERR(ctx);
6656                goto err_alloc;
6657        }
6658
6659        if (task) {
6660                put_task_struct(task);
6661                task = NULL;
6662        }
6663
6664        /*
6665         * Look up the group leader (we will attach this event to it):
6666         */
6667        if (group_leader) {
6668                err = -EINVAL;
6669
6670                /*
6671                 * Do not allow a recursive hierarchy (this new sibling
6672                 * becoming part of another group-sibling):
6673                 */
6674                if (group_leader->group_leader != group_leader)
6675                        goto err_context;
6676                /*
6677                 * Do not allow to attach to a group in a different
6678                 * task or CPU context:
6679                 */
6680                if (move_group) {
6681                        if (group_leader->ctx->type != ctx->type)
6682                                goto err_context;
6683                } else {
6684                        if (group_leader->ctx != ctx)
6685                                goto err_context;
6686                }
6687
6688                /*
6689                 * Only a group leader can be exclusive or pinned
6690                 */
6691                if (attr.exclusive || attr.pinned)
6692                        goto err_context;
6693        }
6694
6695        if (output_event) {
6696                err = perf_event_set_output(event, output_event);
6697                if (err)
6698                        goto err_context;
6699        }
6700
6701        event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
6702        if (IS_ERR(event_file)) {
6703                err = PTR_ERR(event_file);
6704                goto err_context;
6705        }
6706
6707        if (move_group) {
6708                struct perf_event_context *gctx = group_leader->ctx;
6709
6710                mutex_lock(&gctx->mutex);
6711                perf_remove_from_context(group_leader);
6712
6713                /*
6714                 * Removing from the context ends up with disabled
6715                 * event. What we want here is event in the initial
6716                 * startup state, ready to be add into new context.
6717                 */
6718                perf_event__state_init(group_leader);
6719                list_for_each_entry(sibling, &group_leader->sibling_list,
6720                                    group_entry) {
6721                        perf_remove_from_context(sibling);
6722                        perf_event__state_init(sibling);
6723                        put_ctx(gctx);
6724                }
6725                mutex_unlock(&gctx->mutex);
6726                put_ctx(gctx);
6727        }
6728
6729        WARN_ON_ONCE(ctx->parent_ctx);
6730        mutex_lock(&ctx->mutex);
6731
6732        if (move_group) {
6733                synchronize_rcu();
6734                perf_install_in_context(ctx, group_leader, event->cpu);
6735                get_ctx(ctx);
6736                list_for_each_entry(sibling, &group_leader->sibling_list,
6737                                    group_entry) {
6738                        perf_install_in_context(ctx, sibling, event->cpu);
6739                        get_ctx(ctx);
6740                }
6741        }
6742
6743        perf_install_in_context(ctx, event, event->cpu);
6744        ++ctx->generation;
6745        perf_unpin_context(ctx);
6746        mutex_unlock(&ctx->mutex);
6747
6748        put_online_cpus();
6749
6750        event->owner = current;
6751
6752        mutex_lock(&current->perf_event_mutex);
6753        list_add_tail(&event->owner_entry, &current->perf_event_list);
6754        mutex_unlock(&current->perf_event_mutex);
6755
6756        /*
6757         * Precalculate sample_data sizes
6758         */
6759        perf_event__header_size(event);
6760        perf_event__id_header_size(event);
6761
6762        /*
6763         * Drop the reference on the group_event after placing the
6764         * new event on the sibling_list. This ensures destruction
6765         * of the group leader will find the pointer to itself in
6766         * perf_group_detach().
6767         */
6768        fdput(group);
6769        fd_install(event_fd, event_file);
6770        return event_fd;
6771
6772err_context:
6773        perf_unpin_context(ctx);
6774        put_ctx(ctx);
6775err_alloc:
6776        free_event(event);
6777err_task:
6778        put_online_cpus();
6779        if (task)
6780                put_task_struct(task);
6781err_group_fd:
6782        fdput(group);
6783err_fd:
6784        put_unused_fd(event_fd);
6785        return err;
6786}
6787
6788/**
6789 * perf_event_create_kernel_counter
6790 *
6791 * @attr: attributes of the counter to create
6792 * @cpu: cpu in which the counter is bound
6793 * @task: task to profile (NULL for percpu)
6794 */
6795struct perf_event *
6796perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
6797                                 struct task_struct *task,
6798                                 perf_overflow_handler_t overflow_handler,
6799                                 void *context)
6800{
6801        struct perf_event_context *ctx;
6802        struct perf_event *event;
6803        int err;
6804
6805        /*
6806         * Get the target context (task or percpu):
6807         */
6808
6809        event = perf_event_alloc(attr, cpu, task, NULL, NULL,
6810                                 overflow_handler, context);
6811        if (IS_ERR(event)) {
6812                err = PTR_ERR(event);
6813                goto err;
6814        }
6815
6816        ctx = find_get_context(event->pmu, task, cpu);
6817        if (IS_ERR(ctx)) {
6818                err = PTR_ERR(ctx);
6819                goto err_free;
6820        }
6821
6822        WARN_ON_ONCE(ctx->parent_ctx);
6823        mutex_lock(&ctx->mutex);
6824        perf_install_in_context(ctx, event, cpu);
6825        ++ctx->generation;
6826        perf_unpin_context(ctx);
6827        mutex_unlock(&ctx->mutex);
6828
6829        return event;
6830
6831err_free:
6832        free_event(event);
6833err:
6834        return ERR_PTR(err);
6835}
6836EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
6837
6838void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
6839{
6840        struct perf_event_context *src_ctx;
6841        struct perf_event_context *dst_ctx;
6842        struct perf_event *event, *tmp;
6843        LIST_HEAD(events);
6844
6845        src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
6846        dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
6847
6848        mutex_lock(&src_ctx->mutex);
6849        list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
6850                                 event_entry) {
6851                perf_remove_from_context(event);
6852                put_ctx(src_ctx);
6853                list_add(&event->event_entry, &events);
6854        }
6855        mutex_unlock(&src_ctx->mutex);
6856
6857        synchronize_rcu();
6858
6859        mutex_lock(&dst_ctx->mutex);
6860        list_for_each_entry_safe(event, tmp, &events, event_entry) {
6861                list_del(&event->event_entry);
6862                if (event->state >= PERF_EVENT_STATE_OFF)
6863                        event->state = PERF_EVENT_STATE_INACTIVE;
6864                perf_install_in_context(dst_ctx, event, dst_cpu);
6865                get_ctx(dst_ctx);
6866        }
6867        mutex_unlock(&dst_ctx->mutex);
6868}
6869EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
6870
6871static void sync_child_event(struct perf_event *child_event,
6872                               struct task_struct *child)
6873{
6874        struct perf_event *parent_event = child_event->parent;
6875        u64 child_val;
6876
6877        if (child_event->attr.inherit_stat)
6878                perf_event_read_event(child_event, child);
6879
6880        child_val = perf_event_count(child_event);
6881
6882        /*
6883         * Add back the child's count to the parent's count:
6884         */
6885        atomic64_add(child_val, &parent_event->child_count);
6886        atomic64_add(child_event->total_time_enabled,
6887                     &parent_event->child_total_time_enabled);
6888        atomic64_add(child_event->total_time_running,
6889                     &parent_event->child_total_time_running);
6890
6891        /*
6892         * Remove this event from the parent's list
6893         */
6894        WARN_ON_ONCE(parent_event->ctx->parent_ctx);
6895        mutex_lock(&parent_event->child_mutex);
6896        list_del_init(&child_event->child_list);
6897        mutex_unlock(&parent_event->child_mutex);
6898
6899        /*
6900         * Release the parent event, if this was the last
6901         * reference to it.
6902         */
6903        put_event(parent_event);
6904}
6905
6906static void
6907__perf_event_exit_task(struct perf_event *child_event,
6908                         struct perf_event_context *child_ctx,
6909                         struct task_struct *child)
6910{
6911        if (child_event->parent) {
6912                raw_spin_lock_irq(&child_ctx->lock);
6913                perf_group_detach(child_event);
6914                raw_spin_unlock_irq(&child_ctx->lock);
6915        }
6916
6917        perf_remove_from_context(child_event);
6918
6919        /*
6920         * It can happen that the parent exits first, and has events
6921         * that are still around due to the child reference. These
6922         * events need to be zapped.
6923         */
6924        if (child_event->parent) {
6925                sync_child_event(child_event, child);
6926                free_event(child_event);
6927        }
6928}
6929
6930static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
6931{
6932        struct perf_event *child_event, *tmp;
6933        struct perf_event_context *child_ctx;
6934        unsigned long flags;
6935
6936        if (likely(!child->perf_event_ctxp[ctxn])) {
6937                perf_event_task(child, NULL, 0);
6938                return;
6939        }
6940
6941        local_irq_save(flags);
6942        /*
6943         * We can't reschedule here because interrupts are disabled,
6944         * and either child is current or it is a task that can't be
6945         * scheduled, so we are now safe from rescheduling changing
6946         * our context.
6947         */
6948        child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
6949
6950        /*
6951         * Take the context lock here so that if find_get_context is
6952         * reading child->perf_event_ctxp, we wait until it has
6953         * incremented the context's refcount before we do put_ctx below.
6954         */
6955        raw_spin_lock(&child_ctx->lock);
6956        task_ctx_sched_out(child_ctx);
6957        child->perf_event_ctxp[ctxn] = NULL;
6958        /*
6959         * If this context is a clone; unclone it so it can't get
6960         * swapped to another process while we're removing all
6961         * the events from it.
6962         */
6963        unclone_ctx(child_ctx);
6964        update_context_time(child_ctx);
6965        raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
6966
6967        /*
6968         * Report the task dead after unscheduling the events so that we
6969         * won't get any samples after PERF_RECORD_EXIT. We can however still
6970         * get a few PERF_RECORD_READ events.
6971         */
6972        perf_event_task(child, child_ctx, 0);
6973
6974        /*
6975         * We can recurse on the same lock type through:
6976         *
6977         *   __perf_event_exit_task()
6978         *     sync_child_event()
6979         *       put_event()
6980         *         mutex_lock(&ctx->mutex)
6981         *
6982         * But since its the parent context it won't be the same instance.
6983         */
6984        mutex_lock(&child_ctx->mutex);
6985
6986again:
6987        list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
6988                                 group_entry)
6989                __perf_event_exit_task(child_event, child_ctx, child);
6990
6991        list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
6992                                 group_entry)
6993                __perf_event_exit_task(child_event, child_ctx, child);
6994
6995        /*
6996         * If the last event was a group event, it will have appended all
6997         * its siblings to the list, but we obtained 'tmp' before that which
6998         * will still point to the list head terminating the iteration.
6999         */
7000        if (!list_empty(&child_ctx->pinned_groups) ||
7001            !list_empty(&child_ctx->flexible_groups))
7002                goto again;
7003
7004        mutex_unlock(&child_ctx->mutex);
7005
7006        put_ctx(child_ctx);
7007}
7008
7009/*
7010 * When a child task exits, feed back event values to parent events.
7011 */
7012void perf_event_exit_task(struct task_struct *child)
7013{
7014        struct perf_event *event, *tmp;
7015        int ctxn;
7016
7017        mutex_lock(&child->perf_event_mutex);
7018        list_for_each_entry_safe(event, tmp, &child->perf_event_list,
7019                                 owner_entry) {
7020                list_del_init(&event->owner_entry);
7021
7022                /*
7023                 * Ensure the list deletion is visible before we clear
7024                 * the owner, closes a race against perf_release() where
7025                 * we need to serialize on the owner->perf_event_mutex.
7026                 */
7027                smp_wmb();
7028                event->owner = NULL;
7029        }
7030        mutex_unlock(&child->perf_event_mutex);
7031
7032        for_each_task_context_nr(ctxn)
7033                perf_event_exit_task_context(child, ctxn);
7034}
7035
7036static void perf_free_event(struct perf_event *event,
7037                            struct perf_event_context *ctx)
7038{
7039        struct perf_event *parent = event->parent;
7040
7041        if (WARN_ON_ONCE(!parent))
7042                return;
7043
7044        mutex_lock(&parent->child_mutex);
7045        list_del_init(&event->child_list);
7046        mutex_unlock(&parent->child_mutex);
7047
7048        put_event(parent);
7049
7050        perf_group_detach(event);
7051        list_del_event(event, ctx);
7052        free_event(event);
7053}
7054
7055/*
7056 * free an unexposed, unused context as created by inheritance by
7057 * perf_event_init_task below, used by fork() in case of fail.
7058 */
7059void perf_event_free_task(struct task_struct *task)
7060{
7061        struct perf_event_context *ctx;
7062        struct perf_event *event, *tmp;
7063        int ctxn;
7064
7065        for_each_task_context_nr(ctxn) {
7066                ctx = task->perf_event_ctxp[ctxn];
7067                if (!ctx)
7068                        continue;
7069
7070                mutex_lock(&ctx->mutex);
7071again:
7072                list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
7073                                group_entry)
7074                        perf_free_event(event, ctx);
7075
7076                list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
7077                                group_entry)
7078                        perf_free_event(event, ctx);
7079
7080                if (!list_empty(&ctx->pinned_groups) ||
7081                                !list_empty(&ctx->flexible_groups))
7082                        goto again;
7083
7084                mutex_unlock(&ctx->mutex);
7085
7086                put_ctx(ctx);
7087        }
7088}
7089
7090void perf_event_delayed_put(struct task_struct *task)
7091{
7092        int ctxn;
7093
7094        for_each_task_context_nr(ctxn)
7095                WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
7096}
7097
7098/*
7099 * inherit a event from parent task to child task:
7100 */
7101static struct perf_event *
7102inherit_event(struct perf_event *parent_event,
7103              struct task_struct *parent,
7104              struct perf_event_context *parent_ctx,
7105              struct task_struct *child,
7106              struct perf_event *group_leader,
7107              struct perf_event_context *child_ctx)
7108{
7109        struct perf_event *child_event;
7110        unsigned long flags;
7111
7112        /*
7113         * Instead of creating recursive hierarchies of events,
7114         * we link inherited events back to the original parent,
7115         * which has a filp for sure, which we use as the reference
7116         * count:
7117         */
7118        if (parent_event->parent)
7119                parent_event = parent_event->parent;
7120
7121        child_event = perf_event_alloc(&parent_event->attr,
7122                                           parent_event->cpu,
7123                                           child,
7124                                           group_leader, parent_event,
7125                                           NULL, NULL);
7126        if (IS_ERR(child_event))
7127                return child_event;
7128
7129        if (!atomic_long_inc_not_zero(&parent_event->refcount)) {
7130                free_event(child_event);
7131                return NULL;
7132        }
7133
7134        get_ctx(child_ctx);
7135
7136        /*
7137         * Make the child state follow the state of the parent event,
7138         * not its attr.disabled bit.  We hold the parent's mutex,
7139         * so we won't race with perf_event_{en, dis}able_family.
7140         */
7141        if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
7142                child_event->state = PERF_EVENT_STATE_INACTIVE;
7143        else
7144                child_event->state = PERF_EVENT_STATE_OFF;
7145
7146        if (parent_event->attr.freq) {
7147                u64 sample_period = parent_event->hw.sample_period;
7148                struct hw_perf_event *hwc = &child_event->hw;
7149
7150                hwc->sample_period = sample_period;
7151                hwc->last_period   = sample_period;
7152
7153                local64_set(&hwc->period_left, sample_period);
7154        }
7155
7156        child_event->ctx = child_ctx;
7157        child_event->overflow_handler = parent_event->overflow_handler;
7158        child_event->overflow_handler_context
7159                = parent_event->overflow_handler_context;
7160
7161        /*
7162         * Precalculate sample_data sizes
7163         */
7164        perf_event__header_size(child_event);
7165        perf_event__id_header_size(child_event);
7166
7167        /*
7168         * Link it up in the child's context:
7169         */
7170        raw_spin_lock_irqsave(&child_ctx->lock, flags);
7171        add_event_to_ctx(child_event, child_ctx);
7172        raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
7173
7174        /*
7175         * Link this into the parent event's child list
7176         */
7177        WARN_ON_ONCE(parent_event->ctx->parent_ctx);
7178        mutex_lock(&parent_event->child_mutex);
7179        list_add_tail(&child_event->child_list, &parent_event->child_list);
7180        mutex_unlock(&parent_event->child_mutex);
7181
7182        return child_event;
7183}
7184
7185static int inherit_group(struct perf_event *parent_event,
7186              struct task_struct *parent,
7187              struct perf_event_context *parent_ctx,
7188              struct task_struct *child,
7189              struct perf_event_context *child_ctx)
7190{
7191        struct perf_event *leader;
7192        struct perf_event *sub;
7193        struct perf_event *child_ctr;
7194
7195        leader = inherit_event(parent_event, parent, parent_ctx,
7196                                 child, NULL, child_ctx);
7197        if (IS_ERR(leader))
7198                return PTR_ERR(leader);
7199        list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
7200                child_ctr = inherit_event(sub, parent, parent_ctx,
7201                                            child, leader, child_ctx);
7202                if (IS_ERR(child_ctr))
7203                        return PTR_ERR(child_ctr);
7204        }
7205        return 0;
7206}
7207
7208static int
7209inherit_task_group(struct perf_event *event, struct task_struct *parent,
7210                   struct perf_event_context *parent_ctx,
7211                   struct task_struct *child, int ctxn,
7212                   int *inherited_all)
7213{
7214        int ret;
7215        struct perf_event_context *child_ctx;
7216
7217        if (!event->attr.inherit) {
7218                *inherited_all = 0;
7219                return 0;
7220        }
7221
7222        child_ctx = child->perf_event_ctxp[ctxn];
7223        if (!child_ctx) {
7224                /*
7225                 * This is executed from the parent task context, so
7226                 * inherit events that have been marked for cloning.
7227                 * First allocate and initialize a context for the
7228                 * child.
7229                 */
7230
7231                child_ctx = alloc_perf_context(event->pmu, child);
7232                if (!child_ctx)
7233                        return -ENOMEM;
7234
7235                child->perf_event_ctxp[ctxn] = child_ctx;
7236        }
7237
7238        ret = inherit_group(event, parent, parent_ctx,
7239                            child, child_ctx);
7240
7241        if (ret)
7242                *inherited_all = 0;
7243
7244        return ret;
7245}
7246
7247/*
7248 * Initialize the perf_event context in task_struct
7249 */
7250int perf_event_init_context(struct task_struct *child, int ctxn)
7251{
7252        struct perf_event_context *child_ctx, *parent_ctx;
7253        struct perf_event_context *cloned_ctx;
7254        struct perf_event *event;
7255        struct task_struct *parent = current;
7256        int inherited_all = 1;
7257        unsigned long flags;
7258        int ret = 0;
7259
7260        if (likely(!parent->perf_event_ctxp[ctxn]))
7261                return 0;
7262
7263        /*
7264         * If the parent's context is a clone, pin it so it won't get
7265         * swapped under us.
7266         */
7267        parent_ctx = perf_pin_task_context(parent, ctxn);
7268
7269        /*
7270         * No need to check if parent_ctx != NULL here; since we saw
7271         * it non-NULL earlier, the only reason for it to become NULL
7272         * is if we exit, and since we're currently in the middle of
7273         * a fork we can't be exiting at the same time.
7274         */
7275
7276        /*
7277         * Lock the parent list. No need to lock the child - not PID
7278         * hashed yet and not running, so nobody can access it.
7279         */
7280        mutex_lock(&parent_ctx->mutex);
7281
7282        /*
7283         * We dont have to disable NMIs - we are only looking at
7284         * the list, not manipulating it:
7285         */
7286        list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
7287                ret = inherit_task_group(event, parent, parent_ctx,
7288                                         child, ctxn, &inherited_all);
7289                if (ret)
7290                        break;
7291        }
7292
7293        /*
7294         * We can't hold ctx->lock when iterating the ->flexible_group list due
7295         * to allocations, but we need to prevent rotation because
7296         * rotate_ctx() will change the list from interrupt context.
7297         */
7298        raw_spin_lock_irqsave(&parent_ctx->lock, flags);
7299        parent_ctx->rotate_disable = 1;
7300        raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
7301
7302        list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
7303                ret = inherit_task_group(event, parent, parent_ctx,
7304                                         child, ctxn, &inherited_all);
7305                if (ret)
7306                        break;
7307        }
7308
7309        raw_spin_lock_irqsave(&parent_ctx->lock, flags);
7310        parent_ctx->rotate_disable = 0;
7311
7312        child_ctx = child->perf_event_ctxp[ctxn];
7313
7314        if (child_ctx && inherited_all) {
7315                /*
7316                 * Mark the child context as a clone of the parent
7317                 * context, or of whatever the parent is a clone of.
7318                 *
7319                 * Note that if the parent is a clone, the holding of
7320                 * parent_ctx->lock avoids it from being uncloned.
7321                 */
7322                cloned_ctx = parent_ctx->parent_ctx;
7323                if (cloned_ctx) {
7324                        child_ctx->parent_ctx = cloned_ctx;
7325                        child_ctx->parent_gen = parent_ctx->parent_gen;
7326                } else {
7327                        child_ctx->parent_ctx = parent_ctx;
7328                        child_ctx->parent_gen = parent_ctx->generation;
7329                }
7330                get_ctx(child_ctx->parent_ctx);
7331        }
7332
7333        raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
7334        mutex_unlock(&parent_ctx->mutex);
7335
7336        perf_unpin_context(parent_ctx);
7337        put_ctx(parent_ctx);
7338
7339        return ret;
7340}
7341
7342/*
7343 * Initialize the perf_event context in task_struct
7344 */
7345int perf_event_init_task(struct task_struct *child)
7346{
7347        int ctxn, ret;
7348
7349        memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
7350        mutex_init(&child->perf_event_mutex);
7351        INIT_LIST_HEAD(&child->perf_event_list);
7352
7353        for_each_task_context_nr(ctxn) {
7354                ret = perf_event_init_context(child, ctxn);
7355                if (ret)
7356                        return ret;
7357        }
7358
7359        return 0;
7360}
7361
7362static void __init perf_event_init_all_cpus(void)
7363{
7364        struct swevent_htable *swhash;
7365        int cpu;
7366
7367        for_each_possible_cpu(cpu) {
7368                swhash = &per_cpu(swevent_htable, cpu);
7369                mutex_init(&swhash->hlist_mutex);
7370                INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
7371        }
7372}
7373
7374static void __cpuinit perf_event_init_cpu(int cpu)
7375{
7376        struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7377
7378        mutex_lock(&swhash->hlist_mutex);
7379        if (swhash->hlist_refcount > 0) {
7380                struct swevent_hlist *hlist;
7381
7382                hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
7383                WARN_ON(!hlist);
7384                rcu_assign_pointer(swhash->swevent_hlist, hlist);
7385        }
7386        mutex_unlock(&swhash->hlist_mutex);
7387}
7388
7389#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
7390static void perf_pmu_rotate_stop(struct pmu *pmu)
7391{
7392        struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
7393
7394        WARN_ON(!irqs_disabled());
7395
7396        list_del_init(&cpuctx->rotation_list);
7397}
7398
7399static void __perf_event_exit_context(void *__info)
7400{
7401        struct perf_event_context *ctx = __info;
7402        struct perf_event *event, *tmp;
7403
7404        perf_pmu_rotate_stop(ctx->pmu);
7405
7406        list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
7407                __perf_remove_from_context(event);
7408        list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
7409                __perf_remove_from_context(event);
7410}
7411
7412static void perf_event_exit_cpu_context(int cpu)
7413{
7414        struct perf_event_context *ctx;
7415        struct pmu *pmu;
7416        int idx;
7417
7418        idx = srcu_read_lock(&pmus_srcu);
7419        list_for_each_entry_rcu(pmu, &pmus, entry) {
7420                ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
7421
7422                mutex_lock(&ctx->mutex);
7423                smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
7424                mutex_unlock(&ctx->mutex);
7425        }
7426        srcu_read_unlock(&pmus_srcu, idx);
7427}
7428
7429static void perf_event_exit_cpu(int cpu)
7430{
7431        struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7432
7433        mutex_lock(&swhash->hlist_mutex);
7434        swevent_hlist_release(swhash);
7435        mutex_unlock(&swhash->hlist_mutex);
7436
7437        perf_event_exit_cpu_context(cpu);
7438}
7439#else
7440static inline void perf_event_exit_cpu(int cpu) { }
7441#endif
7442
7443static int
7444perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
7445{
7446        int cpu;
7447
7448        for_each_online_cpu(cpu)
7449                perf_event_exit_cpu(cpu);
7450
7451        return NOTIFY_OK;
7452}
7453
7454/*
7455 * Run the perf reboot notifier at the very last possible moment so that
7456 * the generic watchdog code runs as long as possible.
7457 */
7458static struct notifier_block perf_reboot_notifier = {
7459        .notifier_call = perf_reboot,
7460        .priority = INT_MIN,
7461};
7462
7463static int __cpuinit
7464perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
7465{
7466        unsigned int cpu = (long)hcpu;
7467
7468        switch (action & ~CPU_TASKS_FROZEN) {
7469
7470        case CPU_UP_PREPARE:
7471        case CPU_DOWN_FAILED:
7472                perf_event_init_cpu(cpu);
7473                break;
7474
7475        case CPU_UP_CANCELED:
7476        case CPU_DOWN_PREPARE:
7477                perf_event_exit_cpu(cpu);
7478                break;
7479
7480        default:
7481                break;
7482        }
7483
7484        return NOTIFY_OK;
7485}
7486
7487void __init perf_event_init(void)
7488{
7489        int ret;
7490
7491        idr_init(&pmu_idr);
7492
7493        perf_event_init_all_cpus();
7494        init_srcu_struct(&pmus_srcu);
7495        perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
7496        perf_pmu_register(&perf_cpu_clock, NULL, -1);
7497        perf_pmu_register(&perf_task_clock, NULL, -1);
7498        perf_tp_register();
7499        perf_cpu_notifier(perf_cpu_notify);
7500        register_reboot_notifier(&perf_reboot_notifier);
7501
7502        ret = init_hw_breakpoint();
7503        WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
7504
7505        /* do not patch jump label more than once per second */
7506        jump_label_rate_limit(&perf_sched_events, HZ);
7507
7508        /*
7509         * Build time assertion that we keep the data_head at the intended
7510         * location.  IOW, validation we got the __reserved[] size right.
7511         */
7512        BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
7513                     != 1024);
7514}
7515
7516static int __init perf_event_sysfs_init(void)
7517{
7518        struct pmu *pmu;
7519        int ret;
7520
7521        mutex_lock(&pmus_lock);
7522
7523        ret = bus_register(&pmu_bus);
7524        if (ret)
7525                goto unlock;
7526
7527        list_for_each_entry(pmu, &pmus, entry) {
7528                if (!pmu->name || pmu->type < 0)
7529                        continue;
7530
7531                ret = pmu_dev_alloc(pmu);
7532                WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
7533        }
7534        pmu_bus_running = 1;
7535        ret = 0;
7536
7537unlock:
7538        mutex_unlock(&pmus_lock);
7539
7540        return ret;
7541}
7542device_initcall(perf_event_sysfs_init);
7543
7544#ifdef CONFIG_CGROUP_PERF
7545static struct cgroup_subsys_state *perf_cgroup_css_alloc(struct cgroup *cont)
7546{
7547        struct perf_cgroup *jc;
7548
7549        jc = kzalloc(sizeof(*jc), GFP_KERNEL);
7550        if (!jc)
7551                return ERR_PTR(-ENOMEM);
7552
7553        jc->info = alloc_percpu(struct perf_cgroup_info);
7554        if (!jc->info) {
7555                kfree(jc);
7556                return ERR_PTR(-ENOMEM);
7557        }
7558
7559        return &jc->css;
7560}
7561
7562static void perf_cgroup_css_free(struct cgroup *cont)
7563{
7564        struct perf_cgroup *jc;
7565        jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
7566                          struct perf_cgroup, css);
7567        free_percpu(jc->info);
7568        kfree(jc);
7569}
7570
7571static int __perf_cgroup_move(void *info)
7572{
7573        struct task_struct *task = info;
7574        perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
7575        return 0;
7576}
7577
7578static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
7579{
7580        struct task_struct *task;
7581
7582        cgroup_taskset_for_each(task, cgrp, tset)
7583                task_function_call(task, __perf_cgroup_move, task);
7584}
7585
7586static void perf_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
7587                             struct task_struct *task)
7588{
7589        /*
7590         * cgroup_exit() is called in the copy_process() failure path.
7591         * Ignore this case since the task hasn't ran yet, this avoids
7592         * trying to poke a half freed task state from generic code.
7593         */
7594        if (!(task->flags & PF_EXITING))
7595                return;
7596
7597        task_function_call(task, __perf_cgroup_move, task);
7598}
7599
7600struct cgroup_subsys perf_subsys = {
7601        .name           = "perf_event",
7602        .subsys_id      = perf_subsys_id,
7603        .css_alloc      = perf_cgroup_css_alloc,
7604        .css_free       = perf_cgroup_css_free,
7605        .exit           = perf_cgroup_exit,
7606        .attach         = perf_cgroup_attach,
7607};
7608#endif /* CONFIG_CGROUP_PERF */
7609