linux/tools/perf/util/mmap.h
<<
>>
Prefs
   1#ifndef __PERF_MMAP_H
   2#define __PERF_MMAP_H 1
   3
   4#include <linux/compiler.h>
   5#include <linux/refcount.h>
   6#include <linux/types.h>
   7#include <asm/barrier.h>
   8#include <stdbool.h>
   9#include "auxtrace.h"
  10#include "event.h"
  11
  12/**
  13 * struct perf_mmap - perf's ring buffer mmap details
  14 *
  15 * @refcnt - e.g. code using PERF_EVENT_IOC_SET_OUTPUT to share this
  16 */
  17struct perf_mmap {
  18        void             *base;
  19        int              mask;
  20        int              fd;
  21        int              cpu;
  22        refcount_t       refcnt;
  23        u64              prev;
  24        u64              start;
  25        u64              end;
  26        bool             overwrite;
  27        struct auxtrace_mmap auxtrace_mmap;
  28        char             event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8);
  29};
  30
  31/*
  32 * State machine of bkw_mmap_state:
  33 *
  34 *                     .________________(forbid)_____________.
  35 *                     |                                     V
  36 * NOTREADY --(0)--> RUNNING --(1)--> DATA_PENDING --(2)--> EMPTY
  37 *                     ^  ^              |   ^               |
  38 *                     |  |__(forbid)____/   |___(forbid)___/|
  39 *                     |                                     |
  40 *                      \_________________(3)_______________/
  41 *
  42 * NOTREADY     : Backward ring buffers are not ready
  43 * RUNNING      : Backward ring buffers are recording
  44 * DATA_PENDING : We are required to collect data from backward ring buffers
  45 * EMPTY        : We have collected data from backward ring buffers.
  46 *
  47 * (0): Setup backward ring buffer
  48 * (1): Pause ring buffers for reading
  49 * (2): Read from ring buffers
  50 * (3): Resume ring buffers for recording
  51 */
  52enum bkw_mmap_state {
  53        BKW_MMAP_NOTREADY,
  54        BKW_MMAP_RUNNING,
  55        BKW_MMAP_DATA_PENDING,
  56        BKW_MMAP_EMPTY,
  57};
  58
  59struct mmap_params {
  60        int                         prot, mask;
  61        struct auxtrace_mmap_params auxtrace_mp;
  62};
  63
  64int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu);
  65void perf_mmap__munmap(struct perf_mmap *map);
  66
  67void perf_mmap__get(struct perf_mmap *map);
  68void perf_mmap__put(struct perf_mmap *map);
  69
  70void perf_mmap__consume(struct perf_mmap *map);
  71
  72static inline u64 perf_mmap__read_head(struct perf_mmap *mm)
  73{
  74        struct perf_event_mmap_page *pc = mm->base;
  75        u64 head = READ_ONCE(pc->data_head);
  76        rmb();
  77        return head;
  78}
  79
  80static inline void perf_mmap__write_tail(struct perf_mmap *md, u64 tail)
  81{
  82        struct perf_event_mmap_page *pc = md->base;
  83
  84        /*
  85         * ensure all reads are done before we write the tail out.
  86         */
  87        mb();
  88        pc->data_tail = tail;
  89}
  90
  91union perf_event *perf_mmap__read_forward(struct perf_mmap *map);
  92
  93union perf_event *perf_mmap__read_event(struct perf_mmap *map);
  94
  95int perf_mmap__push(struct perf_mmap *md, void *to,
  96                    int push(void *to, void *buf, size_t size));
  97
  98size_t perf_mmap__mmap_len(struct perf_mmap *map);
  99
 100int perf_mmap__read_init(struct perf_mmap *md);
 101void perf_mmap__read_done(struct perf_mmap *map);
 102#endif /*__PERF_MMAP_H */
 103