linux/tools/perf/tests/open-syscall-all-cpus.c
<<
>>
Prefs
   1#include "evsel.h"
   2#include "tests.h"
   3#include "thread_map.h"
   4#include "cpumap.h"
   5#include "debug.h"
   6
   7int test__open_syscall_event_on_all_cpus(void)
   8{
   9        int err = -1, fd, cpu;
  10        struct cpu_map *cpus;
  11        struct perf_evsel *evsel;
  12        unsigned int nr_open_calls = 111, i;
  13        cpu_set_t cpu_set;
  14        struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
  15
  16        if (threads == NULL) {
  17                pr_debug("thread_map__new\n");
  18                return -1;
  19        }
  20
  21        cpus = cpu_map__new(NULL);
  22        if (cpus == NULL) {
  23                pr_debug("cpu_map__new\n");
  24                goto out_thread_map_delete;
  25        }
  26
  27        CPU_ZERO(&cpu_set);
  28
  29        evsel = perf_evsel__newtp("syscalls", "sys_enter_open");
  30        if (evsel == NULL) {
  31                pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
  32                goto out_thread_map_delete;
  33        }
  34
  35        if (perf_evsel__open(evsel, cpus, threads) < 0) {
  36                pr_debug("failed to open counter: %s, "
  37                         "tweak /proc/sys/kernel/perf_event_paranoid?\n",
  38                         strerror(errno));
  39                goto out_evsel_delete;
  40        }
  41
  42        for (cpu = 0; cpu < cpus->nr; ++cpu) {
  43                unsigned int ncalls = nr_open_calls + cpu;
  44                /*
  45                 * XXX eventually lift this restriction in a way that
  46                 * keeps perf building on older glibc installations
  47                 * without CPU_ALLOC. 1024 cpus in 2010 still seems
  48                 * a reasonable upper limit tho :-)
  49                 */
  50                if (cpus->map[cpu] >= CPU_SETSIZE) {
  51                        pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
  52                        continue;
  53                }
  54
  55                CPU_SET(cpus->map[cpu], &cpu_set);
  56                if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
  57                        pr_debug("sched_setaffinity() failed on CPU %d: %s ",
  58                                 cpus->map[cpu],
  59                                 strerror(errno));
  60                        goto out_close_fd;
  61                }
  62                for (i = 0; i < ncalls; ++i) {
  63                        fd = open("/etc/passwd", O_RDONLY);
  64                        close(fd);
  65                }
  66                CPU_CLR(cpus->map[cpu], &cpu_set);
  67        }
  68
  69        /*
  70         * Here we need to explicitely preallocate the counts, as if
  71         * we use the auto allocation it will allocate just for 1 cpu,
  72         * as we start by cpu 0.
  73         */
  74        if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
  75                pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
  76                goto out_close_fd;
  77        }
  78
  79        err = 0;
  80
  81        for (cpu = 0; cpu < cpus->nr; ++cpu) {
  82                unsigned int expected;
  83
  84                if (cpus->map[cpu] >= CPU_SETSIZE)
  85                        continue;
  86
  87                if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
  88                        pr_debug("perf_evsel__read_on_cpu\n");
  89                        err = -1;
  90                        break;
  91                }
  92
  93                expected = nr_open_calls + cpu;
  94                if (evsel->counts->cpu[cpu].val != expected) {
  95                        pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
  96                                 expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
  97                        err = -1;
  98                }
  99        }
 100
 101        perf_evsel__free_counts(evsel);
 102out_close_fd:
 103        perf_evsel__close_fd(evsel, 1, threads->nr);
 104out_evsel_delete:
 105        perf_evsel__delete(evsel);
 106out_thread_map_delete:
 107        thread_map__delete(threads);
 108        return err;
 109}
 110