linux/tools/perf/util/ordered-events.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <errno.h>
   3#include <inttypes.h>
   4#include <linux/list.h>
   5#include <linux/compiler.h>
   6#include <linux/string.h>
   7#include "ordered-events.h"
   8#include "session.h"
   9#include "asm/bug.h"
  10#include "debug.h"
  11
  12#define pr_N(n, fmt, ...) \
  13        eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
  14
  15#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
  16
  17static void queue_event(struct ordered_events *oe, struct ordered_event *new)
  18{
  19        struct ordered_event *last = oe->last;
  20        u64 timestamp = new->timestamp;
  21        struct list_head *p;
  22
  23        ++oe->nr_events;
  24        oe->last = new;
  25
  26        pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
  27
  28        if (!last) {
  29                list_add(&new->list, &oe->events);
  30                oe->max_timestamp = timestamp;
  31                return;
  32        }
  33
  34        /*
  35         * last event might point to some random place in the list as it's
  36         * the last queued event. We expect that the new event is close to
  37         * this.
  38         */
  39        if (last->timestamp <= timestamp) {
  40                while (last->timestamp <= timestamp) {
  41                        p = last->list.next;
  42                        if (p == &oe->events) {
  43                                list_add_tail(&new->list, &oe->events);
  44                                oe->max_timestamp = timestamp;
  45                                return;
  46                        }
  47                        last = list_entry(p, struct ordered_event, list);
  48                }
  49                list_add_tail(&new->list, &last->list);
  50        } else {
  51                while (last->timestamp > timestamp) {
  52                        p = last->list.prev;
  53                        if (p == &oe->events) {
  54                                list_add(&new->list, &oe->events);
  55                                return;
  56                        }
  57                        last = list_entry(p, struct ordered_event, list);
  58                }
  59                list_add(&new->list, &last->list);
  60        }
  61}
  62
  63static union perf_event *__dup_event(struct ordered_events *oe,
  64                                     union perf_event *event)
  65{
  66        union perf_event *new_event = NULL;
  67
  68        if (oe->cur_alloc_size < oe->max_alloc_size) {
  69                new_event = memdup(event, event->header.size);
  70                if (new_event)
  71                        oe->cur_alloc_size += event->header.size;
  72        }
  73
  74        return new_event;
  75}
  76
  77static union perf_event *dup_event(struct ordered_events *oe,
  78                                   union perf_event *event)
  79{
  80        return oe->copy_on_queue ? __dup_event(oe, event) : event;
  81}
  82
  83static void free_dup_event(struct ordered_events *oe, union perf_event *event)
  84{
  85        if (event && oe->copy_on_queue) {
  86                oe->cur_alloc_size -= event->header.size;
  87                free(event);
  88        }
  89}
  90
  91#define MAX_SAMPLE_BUFFER       (64 * 1024 / sizeof(struct ordered_event))
  92static struct ordered_event *alloc_event(struct ordered_events *oe,
  93                                         union perf_event *event)
  94{
  95        struct list_head *cache = &oe->cache;
  96        struct ordered_event *new = NULL;
  97        union perf_event *new_event;
  98
  99        new_event = dup_event(oe, event);
 100        if (!new_event)
 101                return NULL;
 102
 103        if (!list_empty(cache)) {
 104                new = list_entry(cache->next, struct ordered_event, list);
 105                list_del(&new->list);
 106        } else if (oe->buffer) {
 107                new = oe->buffer + oe->buffer_idx;
 108                if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
 109                        oe->buffer = NULL;
 110        } else if (oe->cur_alloc_size < oe->max_alloc_size) {
 111                size_t size = MAX_SAMPLE_BUFFER * sizeof(*new);
 112
 113                oe->buffer = malloc(size);
 114                if (!oe->buffer) {
 115                        free_dup_event(oe, new_event);
 116                        return NULL;
 117                }
 118
 119                pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
 120                   oe->cur_alloc_size, size, oe->max_alloc_size);
 121
 122                oe->cur_alloc_size += size;
 123                list_add(&oe->buffer->list, &oe->to_free);
 124
 125                /* First entry is abused to maintain the to_free list. */
 126                oe->buffer_idx = 2;
 127                new = oe->buffer + 1;
 128        } else {
 129                pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
 130        }
 131
 132        new->event = new_event;
 133        return new;
 134}
 135
 136static struct ordered_event *
 137ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
 138                    union perf_event *event)
 139{
 140        struct ordered_event *new;
 141
 142        new = alloc_event(oe, event);
 143        if (new) {
 144                new->timestamp = timestamp;
 145                queue_event(oe, new);
 146        }
 147
 148        return new;
 149}
 150
 151void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
 152{
 153        list_move(&event->list, &oe->cache);
 154        oe->nr_events--;
 155        free_dup_event(oe, event->event);
 156        event->event = NULL;
 157}
 158
 159int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
 160                          u64 timestamp, u64 file_offset)
 161{
 162        struct ordered_event *oevent;
 163
 164        if (!timestamp || timestamp == ~0ULL)
 165                return -ETIME;
 166
 167        if (timestamp < oe->last_flush) {
 168                pr_oe_time(timestamp,      "out of order event\n");
 169                pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
 170                           oe->last_flush_type);
 171
 172                oe->nr_unordered_events++;
 173        }
 174
 175        oevent = ordered_events__new_event(oe, timestamp, event);
 176        if (!oevent) {
 177                ordered_events__flush(oe, OE_FLUSH__HALF);
 178                oevent = ordered_events__new_event(oe, timestamp, event);
 179        }
 180
 181        if (!oevent)
 182                return -ENOMEM;
 183
 184        oevent->file_offset = file_offset;
 185        return 0;
 186}
 187
 188static int __ordered_events__flush(struct ordered_events *oe)
 189{
 190        struct list_head *head = &oe->events;
 191        struct ordered_event *tmp, *iter;
 192        u64 limit = oe->next_flush;
 193        u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
 194        bool show_progress = limit == ULLONG_MAX;
 195        struct ui_progress prog;
 196        int ret;
 197
 198        if (!limit)
 199                return 0;
 200
 201        if (show_progress)
 202                ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
 203
 204        list_for_each_entry_safe(iter, tmp, head, list) {
 205                if (session_done())
 206                        return 0;
 207
 208                if (iter->timestamp > limit)
 209                        break;
 210                ret = oe->deliver(oe, iter);
 211                if (ret)
 212                        return ret;
 213
 214                ordered_events__delete(oe, iter);
 215                oe->last_flush = iter->timestamp;
 216
 217                if (show_progress)
 218                        ui_progress__update(&prog, 1);
 219        }
 220
 221        if (list_empty(head))
 222                oe->last = NULL;
 223        else if (last_ts <= limit)
 224                oe->last = list_entry(head->prev, struct ordered_event, list);
 225
 226        if (show_progress)
 227                ui_progress__finish();
 228
 229        return 0;
 230}
 231
 232int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
 233{
 234        static const char * const str[] = {
 235                "NONE",
 236                "FINAL",
 237                "ROUND",
 238                "HALF ",
 239        };
 240        int err;
 241
 242        if (oe->nr_events == 0)
 243                return 0;
 244
 245        switch (how) {
 246        case OE_FLUSH__FINAL:
 247                oe->next_flush = ULLONG_MAX;
 248                break;
 249
 250        case OE_FLUSH__HALF:
 251        {
 252                struct ordered_event *first, *last;
 253                struct list_head *head = &oe->events;
 254
 255                first = list_entry(head->next, struct ordered_event, list);
 256                last = oe->last;
 257
 258                /* Warn if we are called before any event got allocated. */
 259                if (WARN_ONCE(!last || list_empty(head), "empty queue"))
 260                        return 0;
 261
 262                oe->next_flush  = first->timestamp;
 263                oe->next_flush += (last->timestamp - first->timestamp) / 2;
 264                break;
 265        }
 266
 267        case OE_FLUSH__ROUND:
 268        case OE_FLUSH__NONE:
 269        default:
 270                break;
 271        };
 272
 273        pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE  %s, nr_events %u\n",
 274                   str[how], oe->nr_events);
 275        pr_oe_time(oe->max_timestamp, "max_timestamp\n");
 276
 277        err = __ordered_events__flush(oe);
 278
 279        if (!err) {
 280                if (how == OE_FLUSH__ROUND)
 281                        oe->next_flush = oe->max_timestamp;
 282
 283                oe->last_flush_type = how;
 284        }
 285
 286        pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
 287                   str[how], oe->nr_events);
 288        pr_oe_time(oe->last_flush, "last_flush\n");
 289
 290        return err;
 291}
 292
 293void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver)
 294{
 295        INIT_LIST_HEAD(&oe->events);
 296        INIT_LIST_HEAD(&oe->cache);
 297        INIT_LIST_HEAD(&oe->to_free);
 298        oe->max_alloc_size = (u64) -1;
 299        oe->cur_alloc_size = 0;
 300        oe->deliver        = deliver;
 301}
 302
 303void ordered_events__free(struct ordered_events *oe)
 304{
 305        while (!list_empty(&oe->to_free)) {
 306                struct ordered_event *event;
 307
 308                event = list_entry(oe->to_free.next, struct ordered_event, list);
 309                list_del(&event->list);
 310                free_dup_event(oe, event->event);
 311                free(event);
 312        }
 313}
 314
 315void ordered_events__reinit(struct ordered_events *oe)
 316{
 317        ordered_events__deliver_t old_deliver = oe->deliver;
 318
 319        ordered_events__free(oe);
 320        memset(oe, '\0', sizeof(*oe));
 321        ordered_events__init(oe, old_deliver);
 322}
 323