linux/tools/perf/arch/x86/tests/intel-cqm.c
<<
>>
Prefs
   1#include "tests/tests.h"
   2#include "perf.h"
   3#include "cloexec.h"
   4#include "debug.h"
   5#include "evlist.h"
   6#include "evsel.h"
   7#include "arch-tests.h"
   8
   9#include <sys/mman.h>
  10#include <string.h>
  11
  12static pid_t spawn(void)
  13{
  14        pid_t pid;
  15
  16        pid = fork();
  17        if (pid)
  18                return pid;
  19
  20        while(1)
  21                sleep(5);
  22        return 0;
  23}
  24
  25/*
  26 * Create an event group that contains both a sampled hardware
  27 * (cpu-cycles) and software (intel_cqm/llc_occupancy/) event. We then
  28 * wait for the hardware perf counter to overflow and generate a PMI,
  29 * which triggers an event read for both of the events in the group.
  30 *
  31 * Since reading Intel CQM event counters requires sending SMP IPIs, the
  32 * CQM pmu needs to handle the above situation gracefully, and return
  33 * the last read counter value to avoid triggering a WARN_ON_ONCE() in
  34 * smp_call_function_many() caused by sending IPIs from NMI context.
  35 */
  36int test__intel_cqm_count_nmi_context(int subtest __maybe_unused)
  37{
  38        struct perf_evlist *evlist = NULL;
  39        struct perf_evsel *evsel = NULL;
  40        struct perf_event_attr pe;
  41        int i, fd[2], flag, ret;
  42        size_t mmap_len;
  43        void *event;
  44        pid_t pid;
  45        int err = TEST_FAIL;
  46
  47        flag = perf_event_open_cloexec_flag();
  48
  49        evlist = perf_evlist__new();
  50        if (!evlist) {
  51                pr_debug("perf_evlist__new failed\n");
  52                return TEST_FAIL;
  53        }
  54
  55        ret = parse_events(evlist, "intel_cqm/llc_occupancy/", NULL);
  56        if (ret) {
  57                pr_debug("parse_events failed, is \"intel_cqm/llc_occupancy/\" available?\n");
  58                err = TEST_SKIP;
  59                goto out;
  60        }
  61
  62        evsel = perf_evlist__first(evlist);
  63        if (!evsel) {
  64                pr_debug("perf_evlist__first failed\n");
  65                goto out;
  66        }
  67
  68        memset(&pe, 0, sizeof(pe));
  69        pe.size = sizeof(pe);
  70
  71        pe.type = PERF_TYPE_HARDWARE;
  72        pe.config = PERF_COUNT_HW_CPU_CYCLES;
  73        pe.read_format = PERF_FORMAT_GROUP;
  74
  75        pe.sample_period = 128;
  76        pe.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_READ;
  77
  78        pid = spawn();
  79
  80        fd[0] = sys_perf_event_open(&pe, pid, -1, -1, flag);
  81        if (fd[0] < 0) {
  82                pr_debug("failed to open event\n");
  83                goto out;
  84        }
  85
  86        memset(&pe, 0, sizeof(pe));
  87        pe.size = sizeof(pe);
  88
  89        pe.type = evsel->attr.type;
  90        pe.config = evsel->attr.config;
  91
  92        fd[1] = sys_perf_event_open(&pe, pid, -1, fd[0], flag);
  93        if (fd[1] < 0) {
  94                pr_debug("failed to open event\n");
  95                goto out;
  96        }
  97
  98        /*
  99         * Pick a power-of-two number of pages + 1 for the meta-data
 100         * page (struct perf_event_mmap_page). See tools/perf/design.txt.
 101         */
 102        mmap_len = page_size * 65;
 103
 104        event = mmap(NULL, mmap_len, PROT_READ, MAP_SHARED, fd[0], 0);
 105        if (event == (void *)(-1)) {
 106                pr_debug("failed to mmap %d\n", errno);
 107                goto out;
 108        }
 109
 110        sleep(1);
 111
 112        err = TEST_OK;
 113
 114        munmap(event, mmap_len);
 115
 116        for (i = 0; i < 2; i++)
 117                close(fd[i]);
 118
 119        kill(pid, SIGKILL);
 120        wait(NULL);
 121out:
 122        perf_evlist__delete(evlist);
 123        return err;
 124}
 125