linux/tools/perf/util/ordered-events.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <errno.h>
   3#include <inttypes.h>
   4#include <linux/list.h>
   5#include <linux/compiler.h>
   6#include <linux/string.h>
   7#include "ordered-events.h"
   8#include "session.h"
   9#include "asm/bug.h"
  10#include "debug.h"
  11
  12#define pr_N(n, fmt, ...) \
  13        eprintf(n, debug_ordered_events, fmt, ##__VA_ARGS__)
  14
  15#define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
  16
  17static void queue_event(struct ordered_events *oe, struct ordered_event *new)
  18{
  19        struct ordered_event *last = oe->last;
  20        u64 timestamp = new->timestamp;
  21        struct list_head *p;
  22
  23        ++oe->nr_events;
  24        oe->last = new;
  25
  26        pr_oe_time2(timestamp, "queue_event nr_events %u\n", oe->nr_events);
  27
  28        if (!last) {
  29                list_add(&new->list, &oe->events);
  30                oe->max_timestamp = timestamp;
  31                return;
  32        }
  33
  34        /*
  35         * last event might point to some random place in the list as it's
  36         * the last queued event. We expect that the new event is close to
  37         * this.
  38         */
  39        if (last->timestamp <= timestamp) {
  40                while (last->timestamp <= timestamp) {
  41                        p = last->list.next;
  42                        if (p == &oe->events) {
  43                                list_add_tail(&new->list, &oe->events);
  44                                oe->max_timestamp = timestamp;
  45                                return;
  46                        }
  47                        last = list_entry(p, struct ordered_event, list);
  48                }
  49                list_add_tail(&new->list, &last->list);
  50        } else {
  51                while (last->timestamp > timestamp) {
  52                        p = last->list.prev;
  53                        if (p == &oe->events) {
  54                                list_add(&new->list, &oe->events);
  55                                return;
  56                        }
  57                        last = list_entry(p, struct ordered_event, list);
  58                }
  59                list_add(&new->list, &last->list);
  60        }
  61}
  62
  63static union perf_event *__dup_event(struct ordered_events *oe,
  64                                     union perf_event *event)
  65{
  66        union perf_event *new_event = NULL;
  67
  68        if (oe->cur_alloc_size < oe->max_alloc_size) {
  69                new_event = memdup(event, event->header.size);
  70                if (new_event)
  71                        oe->cur_alloc_size += event->header.size;
  72        }
  73
  74        return new_event;
  75}
  76
  77static union perf_event *dup_event(struct ordered_events *oe,
  78                                   union perf_event *event)
  79{
  80        return oe->copy_on_queue ? __dup_event(oe, event) : event;
  81}
  82
  83static void __free_dup_event(struct ordered_events *oe, union perf_event *event)
  84{
  85        if (event) {
  86                oe->cur_alloc_size -= event->header.size;
  87                free(event);
  88        }
  89}
  90
  91static void free_dup_event(struct ordered_events *oe, union perf_event *event)
  92{
  93        if (oe->copy_on_queue)
  94                __free_dup_event(oe, event);
  95}
  96
  97#define MAX_SAMPLE_BUFFER       (64 * 1024 / sizeof(struct ordered_event))
  98static struct ordered_event *alloc_event(struct ordered_events *oe,
  99                                         union perf_event *event)
 100{
 101        struct list_head *cache = &oe->cache;
 102        struct ordered_event *new = NULL;
 103        union perf_event *new_event;
 104        size_t size;
 105
 106        new_event = dup_event(oe, event);
 107        if (!new_event)
 108                return NULL;
 109
 110        /*
 111         * We maintain the following scheme of buffers for ordered
 112         * event allocation:
 113         *
 114         *   to_free list -> buffer1 (64K)
 115         *                   buffer2 (64K)
 116         *                   ...
 117         *
 118         * Each buffer keeps an array of ordered events objects:
 119         *    buffer -> event[0]
 120         *              event[1]
 121         *              ...
 122         *
 123         * Each allocated ordered event is linked to one of
 124         * following lists:
 125         *   - time ordered list 'events'
 126         *   - list of currently removed events 'cache'
 127         *
 128         * Allocation of the ordered event uses the following order
 129         * to get the memory:
 130         *   - use recently removed object from 'cache' list
 131         *   - use available object in current allocation buffer
 132         *   - allocate new buffer if the current buffer is full
 133         *
 134         * Removal of ordered event object moves it from events to
 135         * the cache list.
 136         */
 137        size = sizeof(*oe->buffer) + MAX_SAMPLE_BUFFER * sizeof(*new);
 138
 139        if (!list_empty(cache)) {
 140                new = list_entry(cache->next, struct ordered_event, list);
 141                list_del(&new->list);
 142        } else if (oe->buffer) {
 143                new = &oe->buffer->event[oe->buffer_idx];
 144                if (++oe->buffer_idx == MAX_SAMPLE_BUFFER)
 145                        oe->buffer = NULL;
 146        } else if ((oe->cur_alloc_size + size) < oe->max_alloc_size) {
 147                oe->buffer = malloc(size);
 148                if (!oe->buffer) {
 149                        free_dup_event(oe, new_event);
 150                        return NULL;
 151                }
 152
 153                pr("alloc size %" PRIu64 "B (+%zu), max %" PRIu64 "B\n",
 154                   oe->cur_alloc_size, size, oe->max_alloc_size);
 155
 156                oe->cur_alloc_size += size;
 157                list_add(&oe->buffer->list, &oe->to_free);
 158
 159                oe->buffer_idx = 1;
 160                new = &oe->buffer->event[0];
 161        } else {
 162                pr("allocation limit reached %" PRIu64 "B\n", oe->max_alloc_size);
 163                return NULL;
 164        }
 165
 166        new->event = new_event;
 167        return new;
 168}
 169
 170static struct ordered_event *
 171ordered_events__new_event(struct ordered_events *oe, u64 timestamp,
 172                    union perf_event *event)
 173{
 174        struct ordered_event *new;
 175
 176        new = alloc_event(oe, event);
 177        if (new) {
 178                new->timestamp = timestamp;
 179                queue_event(oe, new);
 180        }
 181
 182        return new;
 183}
 184
 185void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event)
 186{
 187        list_move(&event->list, &oe->cache);
 188        oe->nr_events--;
 189        free_dup_event(oe, event->event);
 190        event->event = NULL;
 191}
 192
 193int ordered_events__queue(struct ordered_events *oe, union perf_event *event,
 194                          u64 timestamp, u64 file_offset)
 195{
 196        struct ordered_event *oevent;
 197
 198        if (!timestamp || timestamp == ~0ULL)
 199                return -ETIME;
 200
 201        if (timestamp < oe->last_flush) {
 202                pr_oe_time(timestamp,      "out of order event\n");
 203                pr_oe_time(oe->last_flush, "last flush, last_flush_type %d\n",
 204                           oe->last_flush_type);
 205
 206                oe->nr_unordered_events++;
 207        }
 208
 209        oevent = ordered_events__new_event(oe, timestamp, event);
 210        if (!oevent) {
 211                ordered_events__flush(oe, OE_FLUSH__HALF);
 212                oevent = ordered_events__new_event(oe, timestamp, event);
 213        }
 214
 215        if (!oevent)
 216                return -ENOMEM;
 217
 218        oevent->file_offset = file_offset;
 219        return 0;
 220}
 221
 222static int do_flush(struct ordered_events *oe, bool show_progress)
 223{
 224        struct list_head *head = &oe->events;
 225        struct ordered_event *tmp, *iter;
 226        u64 limit = oe->next_flush;
 227        u64 last_ts = oe->last ? oe->last->timestamp : 0ULL;
 228        struct ui_progress prog;
 229        int ret;
 230
 231        if (!limit)
 232                return 0;
 233
 234        if (show_progress)
 235                ui_progress__init(&prog, oe->nr_events, "Processing time ordered events...");
 236
 237        list_for_each_entry_safe(iter, tmp, head, list) {
 238                if (session_done())
 239                        return 0;
 240
 241                if (iter->timestamp > limit)
 242                        break;
 243                ret = oe->deliver(oe, iter);
 244                if (ret)
 245                        return ret;
 246
 247                ordered_events__delete(oe, iter);
 248                oe->last_flush = iter->timestamp;
 249
 250                if (show_progress)
 251                        ui_progress__update(&prog, 1);
 252        }
 253
 254        if (list_empty(head))
 255                oe->last = NULL;
 256        else if (last_ts <= limit)
 257                oe->last = list_entry(head->prev, struct ordered_event, list);
 258
 259        if (show_progress)
 260                ui_progress__finish();
 261
 262        return 0;
 263}
 264
 265static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how,
 266                                   u64 timestamp)
 267{
 268        static const char * const str[] = {
 269                "NONE",
 270                "FINAL",
 271                "ROUND",
 272                "HALF ",
 273                "TOP  ",
 274                "TIME ",
 275        };
 276        int err;
 277        bool show_progress = false;
 278
 279        if (oe->nr_events == 0)
 280                return 0;
 281
 282        switch (how) {
 283        case OE_FLUSH__FINAL:
 284                show_progress = true;
 285                __fallthrough;
 286        case OE_FLUSH__TOP:
 287                oe->next_flush = ULLONG_MAX;
 288                break;
 289
 290        case OE_FLUSH__HALF:
 291        {
 292                struct ordered_event *first, *last;
 293                struct list_head *head = &oe->events;
 294
 295                first = list_entry(head->next, struct ordered_event, list);
 296                last = oe->last;
 297
 298                /* Warn if we are called before any event got allocated. */
 299                if (WARN_ONCE(!last || list_empty(head), "empty queue"))
 300                        return 0;
 301
 302                oe->next_flush  = first->timestamp;
 303                oe->next_flush += (last->timestamp - first->timestamp) / 2;
 304                break;
 305        }
 306
 307        case OE_FLUSH__TIME:
 308                oe->next_flush = timestamp;
 309                show_progress = false;
 310                break;
 311
 312        case OE_FLUSH__ROUND:
 313        case OE_FLUSH__NONE:
 314        default:
 315                break;
 316        };
 317
 318        pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush PRE  %s, nr_events %u\n",
 319                   str[how], oe->nr_events);
 320        pr_oe_time(oe->max_timestamp, "max_timestamp\n");
 321
 322        err = do_flush(oe, show_progress);
 323
 324        if (!err) {
 325                if (how == OE_FLUSH__ROUND)
 326                        oe->next_flush = oe->max_timestamp;
 327
 328                oe->last_flush_type = how;
 329        }
 330
 331        pr_oe_time(oe->next_flush, "next_flush - ordered_events__flush POST %s, nr_events %u\n",
 332                   str[how], oe->nr_events);
 333        pr_oe_time(oe->last_flush, "last_flush\n");
 334
 335        return err;
 336}
 337
 338int ordered_events__flush(struct ordered_events *oe, enum oe_flush how)
 339{
 340        return __ordered_events__flush(oe, how, 0);
 341}
 342
 343int ordered_events__flush_time(struct ordered_events *oe, u64 timestamp)
 344{
 345        return __ordered_events__flush(oe, OE_FLUSH__TIME, timestamp);
 346}
 347
 348u64 ordered_events__first_time(struct ordered_events *oe)
 349{
 350        struct ordered_event *event;
 351
 352        if (list_empty(&oe->events))
 353                return 0;
 354
 355        event = list_first_entry(&oe->events, struct ordered_event, list);
 356        return event->timestamp;
 357}
 358
 359void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver,
 360                          void *data)
 361{
 362        INIT_LIST_HEAD(&oe->events);
 363        INIT_LIST_HEAD(&oe->cache);
 364        INIT_LIST_HEAD(&oe->to_free);
 365        oe->max_alloc_size = (u64) -1;
 366        oe->cur_alloc_size = 0;
 367        oe->deliver        = deliver;
 368        oe->data           = data;
 369}
 370
 371static void
 372ordered_events_buffer__free(struct ordered_events_buffer *buffer,
 373                            unsigned int max, struct ordered_events *oe)
 374{
 375        if (oe->copy_on_queue) {
 376                unsigned int i;
 377
 378                for (i = 0; i < max; i++)
 379                        __free_dup_event(oe, buffer->event[i].event);
 380        }
 381
 382        free(buffer);
 383}
 384
 385void ordered_events__free(struct ordered_events *oe)
 386{
 387        struct ordered_events_buffer *buffer, *tmp;
 388
 389        if (list_empty(&oe->to_free))
 390                return;
 391
 392        /*
 393         * Current buffer might not have all the events allocated
 394         * yet, we need to free only allocated ones ...
 395         */
 396        if (oe->buffer) {
 397                list_del(&oe->buffer->list);
 398                ordered_events_buffer__free(oe->buffer, oe->buffer_idx, oe);
 399        }
 400
 401        /* ... and continue with the rest */
 402        list_for_each_entry_safe(buffer, tmp, &oe->to_free, list) {
 403                list_del(&buffer->list);
 404                ordered_events_buffer__free(buffer, MAX_SAMPLE_BUFFER, oe);
 405        }
 406}
 407
 408void ordered_events__reinit(struct ordered_events *oe)
 409{
 410        ordered_events__deliver_t old_deliver = oe->deliver;
 411
 412        ordered_events__free(oe);
 413        memset(oe, '\0', sizeof(*oe));
 414        ordered_events__init(oe, old_deliver, oe->data);
 415}
 416