linux/tools/perf/util/ordered-events.c
<<
>>
Prefs
   1#include <linux/list.h>
   2#include <linux/compiler.h>
   3#include <linux/string.h>
   4#include "ordered-events.h"
   5#include "session.h"
   6#include "asm/bug.h"
   7#include "debug.h"
   8
   9#define pr_N(n, fmt, ...) \
  10        eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
  11
  12#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
  13
  14static void queue_event(struct ordered_events *oe, struct ordered_event *new)
  15{
  16        struct ordered_event *last = oe->last;
  17        u64 timestamp = new->timestamp;
  18        struct list_head *p;
  19
  20        ++oe->nr_events;
  21        oe->last = new;
  22
  23        pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
  24
  25        if (!last) {
  26                list_add(&new->list, &oe->events);
  27                oe->max_timestamp = timestamp;
  28                return;
  29        }
  30
  31        /*
  32         * last event might point to some random place in the list as it's
  33         * the last queued event. We expect that the new event is close to
  34         * this.
  35         */
  36        if (last->timestamp <= timestamp) {
  37                while (last->timestamp <= timestamp) {
  38                        p = last->list.next;
  39                        if (p == &oe->events) {
  40                                list_add_tail(&new->list, &oe->events);
  41                                oe->max_timestamp = timestamp;
  42                                return;
  43                        }
  44                        last = list_entry(p, struct ordered_event, list);
  45                }
  46                list_add_tail(&new->list, &last->list);
  47        } else {
  48                while (last->timestamp > timestamp) {
  49                        p = last->list.prev;
  50                        if (p == &oe->events) {
  51                                list_add(&new->list, &oe->events);
  52                                return;
  53                        }
  54                        last = list_entry(p, struct ordered_event, list);
  55                }
  56                list_add(&new->list, &last->list);
  57        }
  58}
  59
  60static union perf_event *__dup_event(struct ordered_events *oe,
  61                                     union perf_event *event)
  62{
  63        union perf_event *new_event = NULL;
  64
  65        if (oe->cur_alloc_size < oe->max_alloc_size) {
  66                new_event = memdup(event, event->header.size);
  67                if (new_event)
  68                        oe->cur_alloc_size += event->header.size;
  69        }
  70
  71        return new_event;
  72}
  73
  74static union perf_event *dup_event(struct ordered_events *oe,
  75                                   union perf_event *event)
  76{
  77        return oe->copy_on_queue ? __dup_event(oe, event) : event;
  78}
  79
  80static void free_dup_event(struct ordered_events *oe, union perf_event *event)
  81{
  82        if (oe->copy_on_queue) {
  83                oe->cur_alloc_size -= event->header.size;
  84                free(event);
  85        }
  86}
  87
  88#define MAX_SAMPLE_BUFFER       (64 * 1024 / sizeof(struct ordered_event))
  89static struct ordered_event *alloc_event(struct ordered_events *oe,
  90                                         union perf_event *event)
  91{
  92        struct list_head *cache = &oe->cache;
  93        struct ordered_event *new = NULL;
  94        union perf_event *new_event;
  95
  96        new_event = dup_event(oe, event);
  97        if (!new_event)
  98                return NULL;
  99
 100        if (!list_empty(cache)) {
 101                new = list_entry(cache->next, struct ordered_event, list);
 102                list_del(&new->list);
 103        } else if (oe->buffer) {
 104                new = oe->buffer + oe->buffer_idx;
 105                if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
 106                        oe->buffer = NULL;
 107        } else if (oe->cur_alloc_size < oe->max_alloc_size) {
 108                size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
 109
 110                oe->buffer = malloc(size);
 111                if (!oe->buffer) {
 112                        free_dup_event(oe, new_event);
 113                        return NULL;
 114                }
 115
 116                pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
 117                   oe->cur_alloc_size, size, oe->max_alloc_size);
 118
 119                oe->cur_alloc_size += size;
 120                list_add(&oe->buffer->list, &oe->to_free);
 121
 122                /* First entry is abused to maintain the to_free list. */
 123                oe->buffer_idx = 2;
 124                new = oe->buffer + 1;
 125        } else {
 126                pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
 127        }
 128
 129        new->event = new_event;
 130        return new;
 131}
 132
 133static struct ordered_event *
 134ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
 135                    union perf_event *event)
 136{
 137        struct ordered_event *new;
 138
 139        new = alloc_event(oe, event);
 140        if (new) {
 141                new->timestamp = timestamp;
 142                queue_event(oe, new);
 143        }
 144
 145        return new;
 146}
 147
 148void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
 149{
 150        list_move(&event->list, &oe->cache);
 151        oe->nr_events--;
 152        free_dup_event(oe, event->event);
 153}
 154
 155int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
 156                          struct perf_sample *sample, u64 file_offset)
 157{
 158        u64 timestamp = sample->time;
 159        struct ordered_event *oevent;
 160
 161        if (!timestamp || timestamp == ~0ULL)
 162                return -ETIME;
 163
 164        if (timestamp < oe->last_flush) {
 165                pr_oe_time(timestamp,      "out of order event\n");
 166                pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
 167                           oe->last_flush_type);
 168
 169                oe->nr_unordered_events++;
 170        }
 171
 172        oevent = ordered_events__new_event(oe, timestamp, event);
 173        if (!oevent) {
 174                ordered_events__flush(oe, OE_FLUSH__HALF);
 175                oevent = ordered_events__new_event(oe, timestamp, event);
 176        }
 177
 178        if (!oevent)
 179                return -ENOMEM;
 180
 181        oevent->file_offset = file_offset;
 182        return 0;
 183}
 184
 185static int __ordered_events__flush(struct ordered_events *oe)
 186{
 187        struct list_head *head = &oe->events;
 188        struct ordered_event *tmp, *iter;
 189        u64 limit = oe->next_flush;
 190        u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
 191        bool show_progress = limit == ULLONG_MAX;
 192        struct ui_progress prog;
 193        int ret;
 194
 195        if (!limit)
 196                return 0;
 197
 198        if (show_progress)
 199                ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
 200
 201        list_for_each_entry_safe(iter, tmp, head, list) {
 202                if (session_done())
 203                        return 0;
 204
 205                if (iter->timestamp > limit)
 206                        break;
 207                ret = oe->deliver(oe, iter);
 208                if (ret)
 209                        return ret;
 210
 211                ordered_events__delete(oe, iter);
 212                oe->last_flush = iter->timestamp;
 213
 214                if (show_progress)
 215                        ui_progress__update(&prog, 1);
 216        }
 217
 218        if (list_empty(head))
 219                oe->last = NULL;
 220        else if (last_ts <= limit)
 221                oe->last = list_entry(head->prev, struct ordered_event, list);
 222
 223        if (show_progress)
 224                ui_progress__finish();
 225
 226        return 0;
 227}
 228
 229int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
 230{
 231        static const char * const str[] = {
 232                "NONE",
 233                "FINAL",
 234                "ROUND",
 235                "HALF ",
 236        };
 237        int err;
 238
 239        if (oe->nr_events == 0)
 240                return 0;
 241
 242        switch (how) {
 243        case OE_FLUSH__FINAL:
 244                oe->next_flush = ULLONG_MAX;
 245                break;
 246
 247        case OE_FLUSH__HALF:
 248        {
 249                struct ordered_event *first, *last;
 250                struct list_head *head = &oe->events;
 251
 252                first = list_entry(head->next, struct ordered_event, list);
 253                last = oe->last;
 254
 255                /* Warn if we are called before any event got allocated. */
 256                if (WARN_ONCE(!last || list_empty(head), "empty queue"))
 257                        return 0;
 258
 259                oe->next_flush  = first->timestamp;
 260                oe->next_flush += (last->timestamp - first->timestamp) / 2;
 261                break;
 262        }
 263
 264        case OE_FLUSH__ROUND:
 265        case OE_FLUSH__NONE:
 266        default:
 267                break;
 268        };
 269
 270        pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE  %s, nr_events %u\n",
 271                   str[how], oe->nr_events);
 272        pr_oe_time(oe->max_timestamp, "max_timestamp\n");
 273
 274        err = __ordered_events__flush(oe);
 275
 276        if (!err) {
 277                if (how == OE_FLUSH__ROUND)
 278                        oe->next_flush = oe->max_timestamp;
 279
 280                oe->last_flush_type = how;
 281        }
 282
 283        pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
 284                   str[how], oe->nr_events);
 285        pr_oe_time(oe->last_flush, "last_flush\n");
 286
 287        return err;
 288}
 289
 290void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver)
 291{
 292        INIT_LIST_HEAD(&oe->events);
 293        INIT_LIST_HEAD(&oe->cache);
 294        INIT_LIST_HEAD(&oe->to_free);
 295        oe->max_alloc_size = (u64) -1;
 296        oe->cur_alloc_size = 0;
 297        oe->deliver        = deliver;
 298}
 299
 300void ordered_events__free(struct ordered_events *oe)
 301{
 302        while (!list_empty(&oe->to_free)) {
 303                struct ordered_event *event;
 304
 305                event = list_entry(oe->to_free.next, struct ordered_event, list);
 306                list_del(&event->list);
 307                free_dup_event(oe, event->event);
 308                free(event);
 309        }
 310}
 311