linux/tools/perf/util/ordered-events.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <errno.h>
   3#include <inttypes.h>
   4#include <linux/list.h>
   5#include <linux/compiler.h>
   6#include <linux/string.h>
   7#include "ordered-events.h"
   8#include "session.h"
   9#include "asm/bug.h"
  10#include "debug.h"
  11
  12#define pr_N(n, fmt, ...) \
  13        eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
  14
  15#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
  16
  17static void queue_event(struct ordered_events *oe, struct ordered_event *new)
  18{
  19        struct ordered_event *last = oe->last;
  20        u64 timestamp = new->timestamp;
  21        struct list_head *p;
  22
  23        ++oe->nr_events;
  24        oe->last = new;
  25
  26        pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
  27
  28        if (!last) {
  29                list_add(&new->list, &oe->events);
  30                oe->max_timestamp = timestamp;
  31                return;
  32        }
  33
  34        /*
  35         * last event might point to some random place in the list as it's
  36         * the last queued event. We expect that the new event is close to
  37         * this.
  38         */
  39        if (last->timestamp <= timestamp) {
  40                while (last->timestamp <= timestamp) {
  41                        p = last->list.next;
  42                        if (p == &oe->events) {
  43                                list_add_tail(&new->list, &oe->events);
  44                                oe->max_timestamp = timestamp;
  45                                return;
  46                        }
  47                        last = list_entry(p, struct ordered_event, list);
  48                }
  49                list_add_tail(&new->list, &last->list);
  50        } else {
  51                while (last->timestamp > timestamp) {
  52                        p = last->list.prev;
  53                        if (p == &oe->events) {
  54                                list_add(&new->list, &oe->events);
  55                                return;
  56                        }
  57                        last = list_entry(p, struct ordered_event, list);
  58                }
  59                list_add(&new->list, &last->list);
  60        }
  61}
  62
  63static union perf_event *__dup_event(struct ordered_events *oe,
  64                                     union perf_event *event)
  65{
  66        union perf_event *new_event = NULL;
  67
  68        if (oe->cur_alloc_size < oe->max_alloc_size) {
  69                new_event = memdup(event, event->header.size);
  70                if (new_event)
  71                        oe->cur_alloc_size += event->header.size;
  72        }
  73
  74        return new_event;
  75}
  76
  77static union perf_event *dup_event(struct ordered_events *oe,
  78                                   union perf_event *event)
  79{
  80        return oe->copy_on_queue ? __dup_event(oe, event) : event;
  81}
  82
  83static void free_dup_event(struct ordered_events *oe, union perf_event *event)
  84{
  85        if (event && oe->copy_on_queue) {
  86                oe->cur_alloc_size -= event->header.size;
  87                free(event);
  88        }
  89}
  90
  91#define MAX_SAMPLE_BUFFER       (64 * 1024 / sizeof(struct ordered_event))
  92static struct ordered_event *alloc_event(struct ordered_events *oe,
  93                                         union perf_event *event)
  94{
  95        struct list_head *cache = &oe->cache;
  96        struct ordered_event *new = NULL;
  97        union perf_event *new_event;
  98
  99        new_event = dup_event(oe, event);
 100        if (!new_event)
 101                return NULL;
 102
 103        if (!list_empty(cache)) {
 104                new = list_entry(cache->next, struct ordered_event, list);
 105                list_del(&new->list);
 106        } else if (oe->buffer) {
 107                new = oe->buffer + oe->buffer_idx;
 108                if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
 109                        oe->buffer = NULL;
 110        } else if (oe->cur_alloc_size < oe->max_alloc_size) {
 111                size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
 112
 113                oe->buffer = malloc(size);
 114                if (!oe->buffer) {
 115                        free_dup_event(oe, new_event);
 116                        return NULL;
 117                }
 118
 119                pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
 120                   oe->cur_alloc_size, size, oe->max_alloc_size);
 121
 122                oe->cur_alloc_size += size;
 123                list_add(&oe->buffer->list, &oe->to_free);
 124
 125                /* First entry is abused to maintain the to_free list. */
 126                oe->buffer_idx = 2;
 127                new = oe->buffer + 1;
 128        } else {
 129                pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
 130        }
 131
 132        new->event = new_event;
 133        return new;
 134}
 135
 136static struct ordered_event *
 137ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
 138                    union perf_event *event)
 139{
 140        struct ordered_event *new;
 141
 142        new = alloc_event(oe, event);
 143        if (new) {
 144                new->timestamp = timestamp;
 145                queue_event(oe, new);
 146        }
 147
 148        return new;
 149}
 150
 151void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
 152{
 153        list_move(&event->list, &oe->cache);
 154        oe->nr_events--;
 155        free_dup_event(oe, event->event);
 156        event->event = NULL;
 157}
 158
 159int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
 160                          struct perf_sample *sample, u64 file_offset)
 161{
 162        u64 timestamp = sample->time;
 163        struct ordered_event *oevent;
 164
 165        if (!timestamp || timestamp == ~0ULL)
 166                return -ETIME;
 167
 168        if (timestamp < oe->last_flush) {
 169                pr_oe_time(timestamp,      "out of order event\n");
 170                pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
 171                           oe->last_flush_type);
 172
 173                oe->nr_unordered_events++;
 174        }
 175
 176        oevent = ordered_events__new_event(oe, timestamp, event);
 177        if (!oevent) {
 178                ordered_events__flush(oe, OE_FLUSH__HALF);
 179                oevent = ordered_events__new_event(oe, timestamp, event);
 180        }
 181
 182        if (!oevent)
 183                return -ENOMEM;
 184
 185        oevent->file_offset = file_offset;
 186        return 0;
 187}
 188
 189static int __ordered_events__flush(struct ordered_events *oe)
 190{
 191        struct list_head *head = &oe->events;
 192        struct ordered_event *tmp, *iter;
 193        u64 limit = oe->next_flush;
 194        u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
 195        bool show_progress = limit == ULLONG_MAX;
 196        struct ui_progress prog;
 197        int ret;
 198
 199        if (!limit)
 200                return 0;
 201
 202        if (show_progress)
 203                ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
 204
 205        list_for_each_entry_safe(iter, tmp, head, list) {
 206                if (session_done())
 207                        return 0;
 208
 209                if (iter->timestamp > limit)
 210                        break;
 211                ret = oe->deliver(oe, iter);
 212                if (ret)
 213                        return ret;
 214
 215                ordered_events__delete(oe, iter);
 216                oe->last_flush = iter->timestamp;
 217
 218                if (show_progress)
 219                        ui_progress__update(&prog, 1);
 220        }
 221
 222        if (list_empty(head))
 223                oe->last = NULL;
 224        else if (last_ts <= limit)
 225                oe->last = list_entry(head->prev, struct ordered_event, list);
 226
 227        if (show_progress)
 228                ui_progress__finish();
 229
 230        return 0;
 231}
 232
 233int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
 234{
 235        static const char * const str[] = {
 236                "NONE",
 237                "FINAL",
 238                "ROUND",
 239                "HALF ",
 240        };
 241        int err;
 242
 243        if (oe->nr_events == 0)
 244                return 0;
 245
 246        switch (how) {
 247        case OE_FLUSH__FINAL:
 248                oe->next_flush = ULLONG_MAX;
 249                break;
 250
 251        case OE_FLUSH__HALF:
 252        {
 253                struct ordered_event *first, *last;
 254                struct list_head *head = &oe->events;
 255
 256                first = list_entry(head->next, struct ordered_event, list);
 257                last = oe->last;
 258
 259                /* Warn if we are called before any event got allocated. */
 260                if (WARN_ONCE(!last || list_empty(head), "empty queue"))
 261                        return 0;
 262
 263                oe->next_flush  = first->timestamp;
 264                oe->next_flush += (last->timestamp - first->timestamp) / 2;
 265                break;
 266        }
 267
 268        case OE_FLUSH__ROUND:
 269        case OE_FLUSH__NONE:
 270        default:
 271                break;
 272        };
 273
 274        pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE  %s, nr_events %u\n",
 275                   str[how], oe->nr_events);
 276        pr_oe_time(oe->max_timestamp, "max_timestamp\n");
 277
 278        err = __ordered_events__flush(oe);
 279
 280        if (!err) {
 281                if (how == OE_FLUSH__ROUND)
 282                        oe->next_flush = oe->max_timestamp;
 283
 284                oe->last_flush_type = how;
 285        }
 286
 287        pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
 288                   str[how], oe->nr_events);
 289        pr_oe_time(oe->last_flush, "last_flush\n");
 290
 291        return err;
 292}
 293
 294void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver)
 295{
 296        INIT_LIST_HEAD(&oe->events);
 297        INIT_LIST_HEAD(&oe->cache);
 298        INIT_LIST_HEAD(&oe->to_free);
 299        oe->max_alloc_size = (u64) -1;
 300        oe->cur_alloc_size = 0;
 301        oe->deliver        = deliver;
 302}
 303
 304void ordered_events__free(struct ordered_events *oe)
 305{
 306        while (!list_empty(&oe->to_free)) {
 307                struct ordered_event *event;
 308
 309                event = list_entry(oe->to_free.next, struct ordered_event, list);
 310                list_del(&event->list);
 311                free_dup_event(oe, event->event);
 312                free(event);
 313        }
 314}
 315
 316void ordered_events__reinit(struct ordered_events *oe)
 317{
 318        ordered_events__deliver_t old_deliver = oe->deliver;
 319
 320        ordered_events__free(oe);
 321        memset(oe, '\0', sizeof(*oe));
 322        ordered_events__init(oe, old_deliver);
 323}
 324