linux/tools/perf/util/cs-etm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright(C) 2015-2018 Linaro Limited.
   4 *
   5 * Author: Tor Jeremiassen <tor@ti.com>
   6 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
   7 */
   8
   9#include <linux/bitops.h>
  10#include <linux/err.h>
  11#include <linux/kernel.h>
  12#include <linux/log2.h>
  13#include <linux/types.h>
  14#include <linux/zalloc.h>
  15
  16#include <opencsd/ocsd_if_types.h>
  17#include <stdlib.h>
  18
  19#include "auxtrace.h"
  20#include "color.h"
  21#include "cs-etm.h"
  22#include "cs-etm-decoder/cs-etm-decoder.h"
  23#include "debug.h"
  24#include "dso.h"
  25#include "evlist.h"
  26#include "intlist.h"
  27#include "machine.h"
  28#include "map.h"
  29#include "perf.h"
  30#include "session.h"
  31#include "map_symbol.h"
  32#include "branch.h"
  33#include "symbol.h"
  34#include "tool.h"
  35#include "thread.h"
  36#include "thread-stack.h"
  37#include <tools/libc_compat.h>
  38#include "util/synthetic-events.h"
  39
  40#define MAX_TIMESTAMP (~0ULL)
  41
  42struct cs_etm_auxtrace {
  43        struct auxtrace auxtrace;
  44        struct auxtrace_queues queues;
  45        struct auxtrace_heap heap;
  46        struct itrace_synth_opts synth_opts;
  47        struct perf_session *session;
  48        struct machine *machine;
  49        struct thread *unknown_thread;
  50
  51        u8 timeless_decoding;
  52        u8 snapshot_mode;
  53        u8 data_queued;
  54        u8 sample_branches;
  55        u8 sample_instructions;
  56
  57        int num_cpu;
  58        u32 auxtrace_type;
  59        u64 branches_sample_type;
  60        u64 branches_id;
  61        u64 instructions_sample_type;
  62        u64 instructions_sample_period;
  63        u64 instructions_id;
  64        u64 **metadata;
  65        u64 kernel_start;
  66        unsigned int pmu_type;
  67};
  68
  69struct cs_etm_traceid_queue {
  70        u8 trace_chan_id;
  71        pid_t pid, tid;
  72        u64 period_instructions;
  73        size_t last_branch_pos;
  74        union perf_event *event_buf;
  75        struct thread *thread;
  76        struct branch_stack *last_branch;
  77        struct branch_stack *last_branch_rb;
  78        struct cs_etm_packet *prev_packet;
  79        struct cs_etm_packet *packet;
  80        struct cs_etm_packet_queue packet_queue;
  81};
  82
  83struct cs_etm_queue {
  84        struct cs_etm_auxtrace *etm;
  85        struct cs_etm_decoder *decoder;
  86        struct auxtrace_buffer *buffer;
  87        unsigned int queue_nr;
  88        u8 pending_timestamp;
  89        u64 offset;
  90        const unsigned char *buf;
  91        size_t buf_len, buf_used;
  92        /* Conversion between traceID and index in traceid_queues array */
  93        struct intlist *traceid_queues_list;
  94        struct cs_etm_traceid_queue **traceid_queues;
  95};
  96
  97/* RB tree for quick conversion between traceID and metadata pointers */
  98static struct intlist *traceid_list;
  99
 100static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
 101static int cs_etm__process_queues(struct cs_etm_auxtrace *etm);
 102static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
 103                                           pid_t tid);
 104static int cs_etm__get_data_block(struct cs_etm_queue *etmq);
 105static int cs_etm__decode_data_block(struct cs_etm_queue *etmq);
 106
 107/* PTMs ETMIDR [11:8] set to b0011 */
 108#define ETMIDR_PTM_VERSION 0x00000300
 109
 110/*
 111 * A struct auxtrace_heap_item only has a queue_nr and a timestamp to
 112 * work with.  One option is to modify to auxtrace_heap_XYZ() API or simply
 113 * encode the etm queue number as the upper 16 bit and the channel as
 114 * the lower 16 bit.
 115 */
 116#define TO_CS_QUEUE_NR(queue_nr, trace_chan_id) \
 117                      (queue_nr << 16 | trace_chan_id)
 118#define TO_QUEUE_NR(cs_queue_nr) (cs_queue_nr >> 16)
 119#define TO_TRACE_CHAN_ID(cs_queue_nr) (cs_queue_nr & 0x0000ffff)
 120
 121static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
 122{
 123        etmidr &= ETMIDR_PTM_VERSION;
 124
 125        if (etmidr == ETMIDR_PTM_VERSION)
 126                return CS_ETM_PROTO_PTM;
 127
 128        return CS_ETM_PROTO_ETMV3;
 129}
 130
 131static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic)
 132{
 133        struct int_node *inode;
 134        u64 *metadata;
 135
 136        inode = intlist__find(traceid_list, trace_chan_id);
 137        if (!inode)
 138                return -EINVAL;
 139
 140        metadata = inode->priv;
 141        *magic = metadata[CS_ETM_MAGIC];
 142        return 0;
 143}
 144
 145int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
 146{
 147        struct int_node *inode;
 148        u64 *metadata;
 149
 150        inode = intlist__find(traceid_list, trace_chan_id);
 151        if (!inode)
 152                return -EINVAL;
 153
 154        metadata = inode->priv;
 155        *cpu = (int)metadata[CS_ETM_CPU];
 156        return 0;
 157}
 158
 159void cs_etm__etmq_set_traceid_queue_timestamp(struct cs_etm_queue *etmq,
 160                                              u8 trace_chan_id)
 161{
 162        /*
 163         * Wnen a timestamp packet is encountered the backend code
 164         * is stopped so that the front end has time to process packets
 165         * that were accumulated in the traceID queue.  Since there can
 166         * be more than one channel per cs_etm_queue, we need to specify
 167         * what traceID queue needs servicing.
 168         */
 169        etmq->pending_timestamp = trace_chan_id;
 170}
 171
 172static u64 cs_etm__etmq_get_timestamp(struct cs_etm_queue *etmq,
 173                                      u8 *trace_chan_id)
 174{
 175        struct cs_etm_packet_queue *packet_queue;
 176
 177        if (!etmq->pending_timestamp)
 178                return 0;
 179
 180        if (trace_chan_id)
 181                *trace_chan_id = etmq->pending_timestamp;
 182
 183        packet_queue = cs_etm__etmq_get_packet_queue(etmq,
 184                                                     etmq->pending_timestamp);
 185        if (!packet_queue)
 186                return 0;
 187
 188        /* Acknowledge pending status */
 189        etmq->pending_timestamp = 0;
 190
 191        /* See function cs_etm_decoder__do_{hard|soft}_timestamp() */
 192        return packet_queue->timestamp;
 193}
 194
 195static void cs_etm__clear_packet_queue(struct cs_etm_packet_queue *queue)
 196{
 197        int i;
 198
 199        queue->head = 0;
 200        queue->tail = 0;
 201        queue->packet_count = 0;
 202        for (i = 0; i < CS_ETM_PACKET_MAX_BUFFER; i++) {
 203                queue->packet_buffer[i].isa = CS_ETM_ISA_UNKNOWN;
 204                queue->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR;
 205                queue->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR;
 206                queue->packet_buffer[i].instr_count = 0;
 207                queue->packet_buffer[i].last_instr_taken_branch = false;
 208                queue->packet_buffer[i].last_instr_size = 0;
 209                queue->packet_buffer[i].last_instr_type = 0;
 210                queue->packet_buffer[i].last_instr_subtype = 0;
 211                queue->packet_buffer[i].last_instr_cond = 0;
 212                queue->packet_buffer[i].flags = 0;
 213                queue->packet_buffer[i].exception_number = UINT32_MAX;
 214                queue->packet_buffer[i].trace_chan_id = UINT8_MAX;
 215                queue->packet_buffer[i].cpu = INT_MIN;
 216        }
 217}
 218
 219static void cs_etm__clear_all_packet_queues(struct cs_etm_queue *etmq)
 220{
 221        int idx;
 222        struct int_node *inode;
 223        struct cs_etm_traceid_queue *tidq;
 224        struct intlist *traceid_queues_list = etmq->traceid_queues_list;
 225
 226        intlist__for_each_entry(inode, traceid_queues_list) {
 227                idx = (int)(intptr_t)inode->priv;
 228                tidq = etmq->traceid_queues[idx];
 229                cs_etm__clear_packet_queue(&tidq->packet_queue);
 230        }
 231}
 232
 233static int cs_etm__init_traceid_queue(struct cs_etm_queue *etmq,
 234                                      struct cs_etm_traceid_queue *tidq,
 235                                      u8 trace_chan_id)
 236{
 237        int rc = -ENOMEM;
 238        struct auxtrace_queue *queue;
 239        struct cs_etm_auxtrace *etm = etmq->etm;
 240
 241        cs_etm__clear_packet_queue(&tidq->packet_queue);
 242
 243        queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
 244        tidq->tid = queue->tid;
 245        tidq->pid = -1;
 246        tidq->trace_chan_id = trace_chan_id;
 247
 248        tidq->packet = zalloc(sizeof(struct cs_etm_packet));
 249        if (!tidq->packet)
 250                goto out;
 251
 252        tidq->prev_packet = zalloc(sizeof(struct cs_etm_packet));
 253        if (!tidq->prev_packet)
 254                goto out_free;
 255
 256        if (etm->synth_opts.last_branch) {
 257                size_t sz = sizeof(struct branch_stack);
 258
 259                sz += etm->synth_opts.last_branch_sz *
 260                      sizeof(struct branch_entry);
 261                tidq->last_branch = zalloc(sz);
 262                if (!tidq->last_branch)
 263                        goto out_free;
 264                tidq->last_branch_rb = zalloc(sz);
 265                if (!tidq->last_branch_rb)
 266                        goto out_free;
 267        }
 268
 269        tidq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
 270        if (!tidq->event_buf)
 271                goto out_free;
 272
 273        return 0;
 274
 275out_free:
 276        zfree(&tidq->last_branch_rb);
 277        zfree(&tidq->last_branch);
 278        zfree(&tidq->prev_packet);
 279        zfree(&tidq->packet);
 280out:
 281        return rc;
 282}
 283
 284static struct cs_etm_traceid_queue
 285*cs_etm__etmq_get_traceid_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
 286{
 287        int idx;
 288        struct int_node *inode;
 289        struct intlist *traceid_queues_list;
 290        struct cs_etm_traceid_queue *tidq, **traceid_queues;
 291        struct cs_etm_auxtrace *etm = etmq->etm;
 292
 293        if (etm->timeless_decoding)
 294                trace_chan_id = CS_ETM_PER_THREAD_TRACEID;
 295
 296        traceid_queues_list = etmq->traceid_queues_list;
 297
 298        /*
 299         * Check if the traceid_queue exist for this traceID by looking
 300         * in the queue list.
 301         */
 302        inode = intlist__find(traceid_queues_list, trace_chan_id);
 303        if (inode) {
 304                idx = (int)(intptr_t)inode->priv;
 305                return etmq->traceid_queues[idx];
 306        }
 307
 308        /* We couldn't find a traceid_queue for this traceID, allocate one */
 309        tidq = malloc(sizeof(*tidq));
 310        if (!tidq)
 311                return NULL;
 312
 313        memset(tidq, 0, sizeof(*tidq));
 314
 315        /* Get a valid index for the new traceid_queue */
 316        idx = intlist__nr_entries(traceid_queues_list);
 317        /* Memory for the inode is free'ed in cs_etm_free_traceid_queues () */
 318        inode = intlist__findnew(traceid_queues_list, trace_chan_id);
 319        if (!inode)
 320                goto out_free;
 321
 322        /* Associate this traceID with this index */
 323        inode->priv = (void *)(intptr_t)idx;
 324
 325        if (cs_etm__init_traceid_queue(etmq, tidq, trace_chan_id))
 326                goto out_free;
 327
 328        /* Grow the traceid_queues array by one unit */
 329        traceid_queues = etmq->traceid_queues;
 330        traceid_queues = reallocarray(traceid_queues,
 331                                      idx + 1,
 332                                      sizeof(*traceid_queues));
 333
 334        /*
 335         * On failure reallocarray() returns NULL and the original block of
 336         * memory is left untouched.
 337         */
 338        if (!traceid_queues)
 339                goto out_free;
 340
 341        traceid_queues[idx] = tidq;
 342        etmq->traceid_queues = traceid_queues;
 343
 344        return etmq->traceid_queues[idx];
 345
 346out_free:
 347        /*
 348         * Function intlist__remove() removes the inode from the list
 349         * and delete the memory associated to it.
 350         */
 351        intlist__remove(traceid_queues_list, inode);
 352        free(tidq);
 353
 354        return NULL;
 355}
 356
 357struct cs_etm_packet_queue
 358*cs_etm__etmq_get_packet_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
 359{
 360        struct cs_etm_traceid_queue *tidq;
 361
 362        tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
 363        if (tidq)
 364                return &tidq->packet_queue;
 365
 366        return NULL;
 367}
 368
 369static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm,
 370                                struct cs_etm_traceid_queue *tidq)
 371{
 372        struct cs_etm_packet *tmp;
 373
 374        if (etm->sample_branches || etm->synth_opts.last_branch ||
 375            etm->sample_instructions) {
 376                /*
 377                 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
 378                 * the next incoming packet.
 379                 */
 380                tmp = tidq->packet;
 381                tidq->packet = tidq->prev_packet;
 382                tidq->prev_packet = tmp;
 383        }
 384}
 385
 386static void cs_etm__packet_dump(const char *pkt_string)
 387{
 388        const char *color = PERF_COLOR_BLUE;
 389        int len = strlen(pkt_string);
 390
 391        if (len && (pkt_string[len-1] == '\n'))
 392                color_fprintf(stdout, color, "  %s", pkt_string);
 393        else
 394                color_fprintf(stdout, color, "  %s\n", pkt_string);
 395
 396        fflush(stdout);
 397}
 398
 399static void cs_etm__set_trace_param_etmv3(struct cs_etm_trace_params *t_params,
 400                                          struct cs_etm_auxtrace *etm, int idx,
 401                                          u32 etmidr)
 402{
 403        u64 **metadata = etm->metadata;
 404
 405        t_params[idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
 406        t_params[idx].etmv3.reg_ctrl = metadata[idx][CS_ETM_ETMCR];
 407        t_params[idx].etmv3.reg_trc_id = metadata[idx][CS_ETM_ETMTRACEIDR];
 408}
 409
 410static void cs_etm__set_trace_param_etmv4(struct cs_etm_trace_params *t_params,
 411                                          struct cs_etm_auxtrace *etm, int idx)
 412{
 413        u64 **metadata = etm->metadata;
 414
 415        t_params[idx].protocol = CS_ETM_PROTO_ETMV4i;
 416        t_params[idx].etmv4.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
 417        t_params[idx].etmv4.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
 418        t_params[idx].etmv4.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
 419        t_params[idx].etmv4.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
 420        t_params[idx].etmv4.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
 421        t_params[idx].etmv4.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
 422}
 423
 424static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
 425                                     struct cs_etm_auxtrace *etm)
 426{
 427        int i;
 428        u32 etmidr;
 429        u64 architecture;
 430
 431        for (i = 0; i < etm->num_cpu; i++) {
 432                architecture = etm->metadata[i][CS_ETM_MAGIC];
 433
 434                switch (architecture) {
 435                case __perf_cs_etmv3_magic:
 436                        etmidr = etm->metadata[i][CS_ETM_ETMIDR];
 437                        cs_etm__set_trace_param_etmv3(t_params, etm, i, etmidr);
 438                        break;
 439                case __perf_cs_etmv4_magic:
 440                        cs_etm__set_trace_param_etmv4(t_params, etm, i);
 441                        break;
 442                default:
 443                        return -EINVAL;
 444                }
 445        }
 446
 447        return 0;
 448}
 449
 450static int cs_etm__init_decoder_params(struct cs_etm_decoder_params *d_params,
 451                                       struct cs_etm_queue *etmq,
 452                                       enum cs_etm_decoder_operation mode)
 453{
 454        int ret = -EINVAL;
 455
 456        if (!(mode < CS_ETM_OPERATION_MAX))
 457                goto out;
 458
 459        d_params->packet_printer = cs_etm__packet_dump;
 460        d_params->operation = mode;
 461        d_params->data = etmq;
 462        d_params->formatted = true;
 463        d_params->fsyncs = false;
 464        d_params->hsyncs = false;
 465        d_params->frame_aligned = true;
 466
 467        ret = 0;
 468out:
 469        return ret;
 470}
 471
 472static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
 473                               struct auxtrace_buffer *buffer)
 474{
 475        int ret;
 476        const char *color = PERF_COLOR_BLUE;
 477        struct cs_etm_decoder_params d_params;
 478        struct cs_etm_trace_params *t_params;
 479        struct cs_etm_decoder *decoder;
 480        size_t buffer_used = 0;
 481
 482        fprintf(stdout, "\n");
 483        color_fprintf(stdout, color,
 484                     ". ... CoreSight ETM Trace data: size %zu bytes\n",
 485                     buffer->size);
 486
 487        /* Use metadata to fill in trace parameters for trace decoder */
 488        t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
 489
 490        if (!t_params)
 491                return;
 492
 493        if (cs_etm__init_trace_params(t_params, etm))
 494                goto out_free;
 495
 496        /* Set decoder parameters to simply print the trace packets */
 497        if (cs_etm__init_decoder_params(&d_params, NULL,
 498                                        CS_ETM_OPERATION_PRINT))
 499                goto out_free;
 500
 501        decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
 502
 503        if (!decoder)
 504                goto out_free;
 505        do {
 506                size_t consumed;
 507
 508                ret = cs_etm_decoder__process_data_block(
 509                                decoder, buffer->offset,
 510                                &((u8 *)buffer->data)[buffer_used],
 511                                buffer->size - buffer_used, &consumed);
 512                if (ret)
 513                        break;
 514
 515                buffer_used += consumed;
 516        } while (buffer_used < buffer->size);
 517
 518        cs_etm_decoder__free(decoder);
 519
 520out_free:
 521        zfree(&t_params);
 522}
 523
 524static int cs_etm__flush_events(struct perf_session *session,
 525                                struct perf_tool *tool)
 526{
 527        int ret;
 528        struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
 529                                                   struct cs_etm_auxtrace,
 530                                                   auxtrace);
 531        if (dump_trace)
 532                return 0;
 533
 534        if (!tool->ordered_events)
 535                return -EINVAL;
 536
 537        ret = cs_etm__update_queues(etm);
 538
 539        if (ret < 0)
 540                return ret;
 541
 542        if (etm->timeless_decoding)
 543                return cs_etm__process_timeless_queues(etm, -1);
 544
 545        return cs_etm__process_queues(etm);
 546}
 547
 548static void cs_etm__free_traceid_queues(struct cs_etm_queue *etmq)
 549{
 550        int idx;
 551        uintptr_t priv;
 552        struct int_node *inode, *tmp;
 553        struct cs_etm_traceid_queue *tidq;
 554        struct intlist *traceid_queues_list = etmq->traceid_queues_list;
 555
 556        intlist__for_each_entry_safe(inode, tmp, traceid_queues_list) {
 557                priv = (uintptr_t)inode->priv;
 558                idx = priv;
 559
 560                /* Free this traceid_queue from the array */
 561                tidq = etmq->traceid_queues[idx];
 562                thread__zput(tidq->thread);
 563                zfree(&tidq->event_buf);
 564                zfree(&tidq->last_branch);
 565                zfree(&tidq->last_branch_rb);
 566                zfree(&tidq->prev_packet);
 567                zfree(&tidq->packet);
 568                zfree(&tidq);
 569
 570                /*
 571                 * Function intlist__remove() removes the inode from the list
 572                 * and delete the memory associated to it.
 573                 */
 574                intlist__remove(traceid_queues_list, inode);
 575        }
 576
 577        /* Then the RB tree itself */
 578        intlist__delete(traceid_queues_list);
 579        etmq->traceid_queues_list = NULL;
 580
 581        /* finally free the traceid_queues array */
 582        zfree(&etmq->traceid_queues);
 583}
 584
 585static void cs_etm__free_queue(void *priv)
 586{
 587        struct cs_etm_queue *etmq = priv;
 588
 589        if (!etmq)
 590                return;
 591
 592        cs_etm_decoder__free(etmq->decoder);
 593        cs_etm__free_traceid_queues(etmq);
 594        free(etmq);
 595}
 596
 597static void cs_etm__free_events(struct perf_session *session)
 598{
 599        unsigned int i;
 600        struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
 601                                                   struct cs_etm_auxtrace,
 602                                                   auxtrace);
 603        struct auxtrace_queues *queues = &aux->queues;
 604
 605        for (i = 0; i < queues->nr_queues; i++) {
 606                cs_etm__free_queue(queues->queue_array[i].priv);
 607                queues->queue_array[i].priv = NULL;
 608        }
 609
 610        auxtrace_queues__free(queues);
 611}
 612
 613static void cs_etm__free(struct perf_session *session)
 614{
 615        int i;
 616        struct int_node *inode, *tmp;
 617        struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
 618                                                   struct cs_etm_auxtrace,
 619                                                   auxtrace);
 620        cs_etm__free_events(session);
 621        session->auxtrace = NULL;
 622
 623        /* First remove all traceID/metadata nodes for the RB tree */
 624        intlist__for_each_entry_safe(inode, tmp, traceid_list)
 625                intlist__remove(traceid_list, inode);
 626        /* Then the RB tree itself */
 627        intlist__delete(traceid_list);
 628
 629        for (i = 0; i < aux->num_cpu; i++)
 630                zfree(&aux->metadata[i]);
 631
 632        thread__zput(aux->unknown_thread);
 633        zfree(&aux->metadata);
 634        zfree(&aux);
 635}
 636
 637static bool cs_etm__evsel_is_auxtrace(struct perf_session *session,
 638                                      struct evsel *evsel)
 639{
 640        struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
 641                                                   struct cs_etm_auxtrace,
 642                                                   auxtrace);
 643
 644        return evsel->core.attr.type == aux->pmu_type;
 645}
 646
 647static u8 cs_etm__cpu_mode(struct cs_etm_queue *etmq, u64 address)
 648{
 649        struct machine *machine;
 650
 651        machine = etmq->etm->machine;
 652
 653        if (address >= etmq->etm->kernel_start) {
 654                if (machine__is_host(machine))
 655                        return PERF_RECORD_MISC_KERNEL;
 656                else
 657                        return PERF_RECORD_MISC_GUEST_KERNEL;
 658        } else {
 659                if (machine__is_host(machine))
 660                        return PERF_RECORD_MISC_USER;
 661                else if (perf_guest)
 662                        return PERF_RECORD_MISC_GUEST_USER;
 663                else
 664                        return PERF_RECORD_MISC_HYPERVISOR;
 665        }
 666}
 667
 668static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u8 trace_chan_id,
 669                              u64 address, size_t size, u8 *buffer)
 670{
 671        u8  cpumode;
 672        u64 offset;
 673        int len;
 674        struct thread *thread;
 675        struct machine *machine;
 676        struct addr_location al;
 677        struct cs_etm_traceid_queue *tidq;
 678
 679        if (!etmq)
 680                return 0;
 681
 682        machine = etmq->etm->machine;
 683        cpumode = cs_etm__cpu_mode(etmq, address);
 684        tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
 685        if (!tidq)
 686                return 0;
 687
 688        thread = tidq->thread;
 689        if (!thread) {
 690                if (cpumode != PERF_RECORD_MISC_KERNEL)
 691                        return 0;
 692                thread = etmq->etm->unknown_thread;
 693        }
 694
 695        if (!thread__find_map(thread, cpumode, address, &al) || !al.map->dso)
 696                return 0;
 697
 698        if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
 699            dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE))
 700                return 0;
 701
 702        offset = al.map->map_ip(al.map, address);
 703
 704        map__load(al.map);
 705
 706        len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size);
 707
 708        if (len <= 0)
 709                return 0;
 710
 711        return len;
 712}
 713
 714static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm)
 715{
 716        struct cs_etm_decoder_params d_params;
 717        struct cs_etm_trace_params  *t_params = NULL;
 718        struct cs_etm_queue *etmq;
 719
 720        etmq = zalloc(sizeof(*etmq));
 721        if (!etmq)
 722                return NULL;
 723
 724        etmq->traceid_queues_list = intlist__new(NULL);
 725        if (!etmq->traceid_queues_list)
 726                goto out_free;
 727
 728        /* Use metadata to fill in trace parameters for trace decoder */
 729        t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
 730
 731        if (!t_params)
 732                goto out_free;
 733
 734        if (cs_etm__init_trace_params(t_params, etm))
 735                goto out_free;
 736
 737        /* Set decoder parameters to decode trace packets */
 738        if (cs_etm__init_decoder_params(&d_params, etmq,
 739                                        CS_ETM_OPERATION_DECODE))
 740                goto out_free;
 741
 742        etmq->decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
 743
 744        if (!etmq->decoder)
 745                goto out_free;
 746
 747        /*
 748         * Register a function to handle all memory accesses required by
 749         * the trace decoder library.
 750         */
 751        if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
 752                                              0x0L, ((u64) -1L),
 753                                              cs_etm__mem_access))
 754                goto out_free_decoder;
 755
 756        zfree(&t_params);
 757        return etmq;
 758
 759out_free_decoder:
 760        cs_etm_decoder__free(etmq->decoder);
 761out_free:
 762        intlist__delete(etmq->traceid_queues_list);
 763        free(etmq);
 764
 765        return NULL;
 766}
 767
 768static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
 769                               struct auxtrace_queue *queue,
 770                               unsigned int queue_nr)
 771{
 772        int ret = 0;
 773        unsigned int cs_queue_nr;
 774        u8 trace_chan_id;
 775        u64 timestamp;
 776        struct cs_etm_queue *etmq = queue->priv;
 777
 778        if (list_empty(&queue->head) || etmq)
 779                goto out;
 780
 781        etmq = cs_etm__alloc_queue(etm);
 782
 783        if (!etmq) {
 784                ret = -ENOMEM;
 785                goto out;
 786        }
 787
 788        queue->priv = etmq;
 789        etmq->etm = etm;
 790        etmq->queue_nr = queue_nr;
 791        etmq->offset = 0;
 792
 793        if (etm->timeless_decoding)
 794                goto out;
 795
 796        /*
 797         * We are under a CPU-wide trace scenario.  As such we need to know
 798         * when the code that generated the traces started to execute so that
 799         * it can be correlated with execution on other CPUs.  So we get a
 800         * handle on the beginning of traces and decode until we find a
 801         * timestamp.  The timestamp is then added to the auxtrace min heap
 802         * in order to know what nibble (of all the etmqs) to decode first.
 803         */
 804        while (1) {
 805                /*
 806                 * Fetch an aux_buffer from this etmq.  Bail if no more
 807                 * blocks or an error has been encountered.
 808                 */
 809                ret = cs_etm__get_data_block(etmq);
 810                if (ret <= 0)
 811                        goto out;
 812
 813                /*
 814                 * Run decoder on the trace block.  The decoder will stop when
 815                 * encountering a timestamp, a full packet queue or the end of
 816                 * trace for that block.
 817                 */
 818                ret = cs_etm__decode_data_block(etmq);
 819                if (ret)
 820                        goto out;
 821
 822                /*
 823                 * Function cs_etm_decoder__do_{hard|soft}_timestamp() does all
 824                 * the timestamp calculation for us.
 825                 */
 826                timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
 827
 828                /* We found a timestamp, no need to continue. */
 829                if (timestamp)
 830                        break;
 831
 832                /*
 833                 * We didn't find a timestamp so empty all the traceid packet
 834                 * queues before looking for another timestamp packet, either
 835                 * in the current data block or a new one.  Packets that were
 836                 * just decoded are useless since no timestamp has been
 837                 * associated with them.  As such simply discard them.
 838                 */
 839                cs_etm__clear_all_packet_queues(etmq);
 840        }
 841
 842        /*
 843         * We have a timestamp.  Add it to the min heap to reflect when
 844         * instructions conveyed by the range packets of this traceID queue
 845         * started to execute.  Once the same has been done for all the traceID
 846         * queues of each etmq, redenring and decoding can start in
 847         * chronological order.
 848         *
 849         * Note that packets decoded above are still in the traceID's packet
 850         * queue and will be processed in cs_etm__process_queues().
 851         */
 852        cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
 853        ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, timestamp);
 854out:
 855        return ret;
 856}
 857
 858static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
 859{
 860        unsigned int i;
 861        int ret;
 862
 863        if (!etm->kernel_start)
 864                etm->kernel_start = machine__kernel_start(etm->machine);
 865
 866        for (i = 0; i < etm->queues.nr_queues; i++) {
 867                ret = cs_etm__setup_queue(etm, &etm->queues.queue_array[i], i);
 868                if (ret)
 869                        return ret;
 870        }
 871
 872        return 0;
 873}
 874
 875static int cs_etm__update_queues(struct cs_etm_auxtrace *etm)
 876{
 877        if (etm->queues.new_data) {
 878                etm->queues.new_data = false;
 879                return cs_etm__setup_queues(etm);
 880        }
 881
 882        return 0;
 883}
 884
 885static inline
 886void cs_etm__copy_last_branch_rb(struct cs_etm_queue *etmq,
 887                                 struct cs_etm_traceid_queue *tidq)
 888{
 889        struct branch_stack *bs_src = tidq->last_branch_rb;
 890        struct branch_stack *bs_dst = tidq->last_branch;
 891        size_t nr = 0;
 892
 893        /*
 894         * Set the number of records before early exit: ->nr is used to
 895         * determine how many branches to copy from ->entries.
 896         */
 897        bs_dst->nr = bs_src->nr;
 898
 899        /*
 900         * Early exit when there is nothing to copy.
 901         */
 902        if (!bs_src->nr)
 903                return;
 904
 905        /*
 906         * As bs_src->entries is a circular buffer, we need to copy from it in
 907         * two steps.  First, copy the branches from the most recently inserted
 908         * branch ->last_branch_pos until the end of bs_src->entries buffer.
 909         */
 910        nr = etmq->etm->synth_opts.last_branch_sz - tidq->last_branch_pos;
 911        memcpy(&bs_dst->entries[0],
 912               &bs_src->entries[tidq->last_branch_pos],
 913               sizeof(struct branch_entry) * nr);
 914
 915        /*
 916         * If we wrapped around at least once, the branches from the beginning
 917         * of the bs_src->entries buffer and until the ->last_branch_pos element
 918         * are older valid branches: copy them over.  The total number of
 919         * branches copied over will be equal to the number of branches asked by
 920         * the user in last_branch_sz.
 921         */
 922        if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) {
 923                memcpy(&bs_dst->entries[nr],
 924                       &bs_src->entries[0],
 925                       sizeof(struct branch_entry) * tidq->last_branch_pos);
 926        }
 927}
 928
 929static inline
 930void cs_etm__reset_last_branch_rb(struct cs_etm_traceid_queue *tidq)
 931{
 932        tidq->last_branch_pos = 0;
 933        tidq->last_branch_rb->nr = 0;
 934}
 935
 936static inline int cs_etm__t32_instr_size(struct cs_etm_queue *etmq,
 937                                         u8 trace_chan_id, u64 addr)
 938{
 939        u8 instrBytes[2];
 940
 941        cs_etm__mem_access(etmq, trace_chan_id, addr,
 942                           ARRAY_SIZE(instrBytes), instrBytes);
 943        /*
 944         * T32 instruction size is indicated by bits[15:11] of the first
 945         * 16-bit word of the instruction: 0b11101, 0b11110 and 0b11111
 946         * denote a 32-bit instruction.
 947         */
 948        return ((instrBytes[1] & 0xF8) >= 0xE8) ? 4 : 2;
 949}
 950
 951static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
 952{
 953        /* Returns 0 for the CS_ETM_DISCONTINUITY packet */
 954        if (packet->sample_type == CS_ETM_DISCONTINUITY)
 955                return 0;
 956
 957        return packet->start_addr;
 958}
 959
 960static inline
 961u64 cs_etm__last_executed_instr(const struct cs_etm_packet *packet)
 962{
 963        /* Returns 0 for the CS_ETM_DISCONTINUITY packet */
 964        if (packet->sample_type == CS_ETM_DISCONTINUITY)
 965                return 0;
 966
 967        return packet->end_addr - packet->last_instr_size;
 968}
 969
 970static inline u64 cs_etm__instr_addr(struct cs_etm_queue *etmq,
 971                                     u64 trace_chan_id,
 972                                     const struct cs_etm_packet *packet,
 973                                     u64 offset)
 974{
 975        if (packet->isa == CS_ETM_ISA_T32) {
 976                u64 addr = packet->start_addr;
 977
 978                while (offset) {
 979                        addr += cs_etm__t32_instr_size(etmq,
 980                                                       trace_chan_id, addr);
 981                        offset--;
 982                }
 983                return addr;
 984        }
 985
 986        /* Assume a 4 byte instruction size (A32/A64) */
 987        return packet->start_addr + offset * 4;
 988}
 989
 990static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq,
 991                                          struct cs_etm_traceid_queue *tidq)
 992{
 993        struct branch_stack *bs = tidq->last_branch_rb;
 994        struct branch_entry *be;
 995
 996        /*
 997         * The branches are recorded in a circular buffer in reverse
 998         * chronological order: we start recording from the last element of the
 999         * buffer down.  After writing the first element of the stack, move the
1000         * insert position back to the end of the buffer.
1001         */
1002        if (!tidq->last_branch_pos)
1003                tidq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
1004
1005        tidq->last_branch_pos -= 1;
1006
1007        be       = &bs->entries[tidq->last_branch_pos];
1008        be->from = cs_etm__last_executed_instr(tidq->prev_packet);
1009        be->to   = cs_etm__first_executed_instr(tidq->packet);
1010        /* No support for mispredict */
1011        be->flags.mispred = 0;
1012        be->flags.predicted = 1;
1013
1014        /*
1015         * Increment bs->nr until reaching the number of last branches asked by
1016         * the user on the command line.
1017         */
1018        if (bs->nr < etmq->etm->synth_opts.last_branch_sz)
1019                bs->nr += 1;
1020}
1021
1022static int cs_etm__inject_event(union perf_event *event,
1023                               struct perf_sample *sample, u64 type)
1024{
1025        event->header.size = perf_event__sample_event_size(sample, type, 0);
1026        return perf_event__synthesize_sample(event, type, 0, sample);
1027}
1028
1029
1030static int
1031cs_etm__get_trace(struct cs_etm_queue *etmq)
1032{
1033        struct auxtrace_buffer *aux_buffer = etmq->buffer;
1034        struct auxtrace_buffer *old_buffer = aux_buffer;
1035        struct auxtrace_queue *queue;
1036
1037        queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
1038
1039        aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
1040
1041        /* If no more data, drop the previous auxtrace_buffer and return */
1042        if (!aux_buffer) {
1043                if (old_buffer)
1044                        auxtrace_buffer__drop_data(old_buffer);
1045                etmq->buf_len = 0;
1046                return 0;
1047        }
1048
1049        etmq->buffer = aux_buffer;
1050
1051        /* If the aux_buffer doesn't have data associated, try to load it */
1052        if (!aux_buffer->data) {
1053                /* get the file desc associated with the perf data file */
1054                int fd = perf_data__fd(etmq->etm->session->data);
1055
1056                aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
1057                if (!aux_buffer->data)
1058                        return -ENOMEM;
1059        }
1060
1061        /* If valid, drop the previous buffer */
1062        if (old_buffer)
1063                auxtrace_buffer__drop_data(old_buffer);
1064
1065        etmq->buf_used = 0;
1066        etmq->buf_len = aux_buffer->size;
1067        etmq->buf = aux_buffer->data;
1068
1069        return etmq->buf_len;
1070}
1071
1072static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
1073                                    struct cs_etm_traceid_queue *tidq)
1074{
1075        if ((!tidq->thread) && (tidq->tid != -1))
1076                tidq->thread = machine__find_thread(etm->machine, -1,
1077                                                    tidq->tid);
1078
1079        if (tidq->thread)
1080                tidq->pid = tidq->thread->pid_;
1081}
1082
1083int cs_etm__etmq_set_tid(struct cs_etm_queue *etmq,
1084                         pid_t tid, u8 trace_chan_id)
1085{
1086        int cpu, err = -EINVAL;
1087        struct cs_etm_auxtrace *etm = etmq->etm;
1088        struct cs_etm_traceid_queue *tidq;
1089
1090        tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
1091        if (!tidq)
1092                return err;
1093
1094        if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
1095                return err;
1096
1097        err = machine__set_current_tid(etm->machine, cpu, tid, tid);
1098        if (err)
1099                return err;
1100
1101        tidq->tid = tid;
1102        thread__zput(tidq->thread);
1103
1104        cs_etm__set_pid_tid_cpu(etm, tidq);
1105        return 0;
1106}
1107
1108bool cs_etm__etmq_is_timeless(struct cs_etm_queue *etmq)
1109{
1110        return !!etmq->etm->timeless_decoding;
1111}
1112
1113static void cs_etm__copy_insn(struct cs_etm_queue *etmq,
1114                              u64 trace_chan_id,
1115                              const struct cs_etm_packet *packet,
1116                              struct perf_sample *sample)
1117{
1118        /*
1119         * It's pointless to read instructions for the CS_ETM_DISCONTINUITY
1120         * packet, so directly bail out with 'insn_len' = 0.
1121         */
1122        if (packet->sample_type == CS_ETM_DISCONTINUITY) {
1123                sample->insn_len = 0;
1124                return;
1125        }
1126
1127        /*
1128         * T32 instruction size might be 32-bit or 16-bit, decide by calling
1129         * cs_etm__t32_instr_size().
1130         */
1131        if (packet->isa == CS_ETM_ISA_T32)
1132                sample->insn_len = cs_etm__t32_instr_size(etmq, trace_chan_id,
1133                                                          sample->ip);
1134        /* Otherwise, A64 and A32 instruction size are always 32-bit. */
1135        else
1136                sample->insn_len = 4;
1137
1138        cs_etm__mem_access(etmq, trace_chan_id, sample->ip,
1139                           sample->insn_len, (void *)sample->insn);
1140}
1141
1142static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
1143                                            struct cs_etm_traceid_queue *tidq,
1144                                            u64 addr, u64 period)
1145{
1146        int ret = 0;
1147        struct cs_etm_auxtrace *etm = etmq->etm;
1148        union perf_event *event = tidq->event_buf;
1149        struct perf_sample sample = {.ip = 0,};
1150
1151        event->sample.header.type = PERF_RECORD_SAMPLE;
1152        event->sample.header.misc = cs_etm__cpu_mode(etmq, addr);
1153        event->sample.header.size = sizeof(struct perf_event_header);
1154
1155        sample.ip = addr;
1156        sample.pid = tidq->pid;
1157        sample.tid = tidq->tid;
1158        sample.id = etmq->etm->instructions_id;
1159        sample.stream_id = etmq->etm->instructions_id;
1160        sample.period = period;
1161        sample.cpu = tidq->packet->cpu;
1162        sample.flags = tidq->prev_packet->flags;
1163        sample.cpumode = event->sample.header.misc;
1164
1165        cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->packet, &sample);
1166
1167        if (etm->synth_opts.last_branch)
1168                sample.branch_stack = tidq->last_branch;
1169
1170        if (etm->synth_opts.inject) {
1171                ret = cs_etm__inject_event(event, &sample,
1172                                           etm->instructions_sample_type);
1173                if (ret)
1174                        return ret;
1175        }
1176
1177        ret = perf_session__deliver_synth_event(etm->session, event, &sample);
1178
1179        if (ret)
1180                pr_err(
1181                        "CS ETM Trace: failed to deliver instruction event, error %d\n",
1182                        ret);
1183
1184        return ret;
1185}
1186
1187/*
1188 * The cs etm packet encodes an instruction range between a branch target
1189 * and the next taken branch. Generate sample accordingly.
1190 */
1191static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
1192                                       struct cs_etm_traceid_queue *tidq)
1193{
1194        int ret = 0;
1195        struct cs_etm_auxtrace *etm = etmq->etm;
1196        struct perf_sample sample = {.ip = 0,};
1197        union perf_event *event = tidq->event_buf;
1198        struct dummy_branch_stack {
1199                u64                     nr;
1200                u64                     hw_idx;
1201                struct branch_entry     entries;
1202        } dummy_bs;
1203        u64 ip;
1204
1205        ip = cs_etm__last_executed_instr(tidq->prev_packet);
1206
1207        event->sample.header.type = PERF_RECORD_SAMPLE;
1208        event->sample.header.misc = cs_etm__cpu_mode(etmq, ip);
1209        event->sample.header.size = sizeof(struct perf_event_header);
1210
1211        sample.ip = ip;
1212        sample.pid = tidq->pid;
1213        sample.tid = tidq->tid;
1214        sample.addr = cs_etm__first_executed_instr(tidq->packet);
1215        sample.id = etmq->etm->branches_id;
1216        sample.stream_id = etmq->etm->branches_id;
1217        sample.period = 1;
1218        sample.cpu = tidq->packet->cpu;
1219        sample.flags = tidq->prev_packet->flags;
1220        sample.cpumode = event->sample.header.misc;
1221
1222        cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->prev_packet,
1223                          &sample);
1224
1225        /*
1226         * perf report cannot handle events without a branch stack
1227         */
1228        if (etm->synth_opts.last_branch) {
1229                dummy_bs = (struct dummy_branch_stack){
1230                        .nr = 1,
1231                        .hw_idx = -1ULL,
1232                        .entries = {
1233                                .from = sample.ip,
1234                                .to = sample.addr,
1235                        },
1236                };
1237                sample.branch_stack = (struct branch_stack *)&dummy_bs;
1238        }
1239
1240        if (etm->synth_opts.inject) {
1241                ret = cs_etm__inject_event(event, &sample,
1242                                           etm->branches_sample_type);
1243                if (ret)
1244                        return ret;
1245        }
1246
1247        ret = perf_session__deliver_synth_event(etm->session, event, &sample);
1248
1249        if (ret)
1250                pr_err(
1251                "CS ETM Trace: failed to deliver instruction event, error %d\n",
1252                ret);
1253
1254        return ret;
1255}
1256
1257struct cs_etm_synth {
1258        struct perf_tool dummy_tool;
1259        struct perf_session *session;
1260};
1261
1262static int cs_etm__event_synth(struct perf_tool *tool,
1263                               union perf_event *event,
1264                               struct perf_sample *sample __maybe_unused,
1265                               struct machine *machine __maybe_unused)
1266{
1267        struct cs_etm_synth *cs_etm_synth =
1268                      container_of(tool, struct cs_etm_synth, dummy_tool);
1269
1270        return perf_session__deliver_synth_event(cs_etm_synth->session,
1271                                                 event, NULL);
1272}
1273
1274static int cs_etm__synth_event(struct perf_session *session,
1275                               struct perf_event_attr *attr, u64 id)
1276{
1277        struct cs_etm_synth cs_etm_synth;
1278
1279        memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
1280        cs_etm_synth.session = session;
1281
1282        return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
1283                                           &id, cs_etm__event_synth);
1284}
1285
1286static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
1287                                struct perf_session *session)
1288{
1289        struct evlist *evlist = session->evlist;
1290        struct evsel *evsel;
1291        struct perf_event_attr attr;
1292        bool found = false;
1293        u64 id;
1294        int err;
1295
1296        evlist__for_each_entry(evlist, evsel) {
1297                if (evsel->core.attr.type == etm->pmu_type) {
1298                        found = true;
1299                        break;
1300                }
1301        }
1302
1303        if (!found) {
1304                pr_debug("No selected events with CoreSight Trace data\n");
1305                return 0;
1306        }
1307
1308        memset(&attr, 0, sizeof(struct perf_event_attr));
1309        attr.size = sizeof(struct perf_event_attr);
1310        attr.type = PERF_TYPE_HARDWARE;
1311        attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
1312        attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
1313                            PERF_SAMPLE_PERIOD;
1314        if (etm->timeless_decoding)
1315                attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
1316        else
1317                attr.sample_type |= PERF_SAMPLE_TIME;
1318
1319        attr.exclude_user = evsel->core.attr.exclude_user;
1320        attr.exclude_kernel = evsel->core.attr.exclude_kernel;
1321        attr.exclude_hv = evsel->core.attr.exclude_hv;
1322        attr.exclude_host = evsel->core.attr.exclude_host;
1323        attr.exclude_guest = evsel->core.attr.exclude_guest;
1324        attr.sample_id_all = evsel->core.attr.sample_id_all;
1325        attr.read_format = evsel->core.attr.read_format;
1326
1327        /* create new id val to be a fixed offset from evsel id */
1328        id = evsel->core.id[0] + 1000000000;
1329
1330        if (!id)
1331                id = 1;
1332
1333        if (etm->synth_opts.branches) {
1334                attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
1335                attr.sample_period = 1;
1336                attr.sample_type |= PERF_SAMPLE_ADDR;
1337                err = cs_etm__synth_event(session, &attr, id);
1338                if (err)
1339                        return err;
1340                etm->sample_branches = true;
1341                etm->branches_sample_type = attr.sample_type;
1342                etm->branches_id = id;
1343                id += 1;
1344                attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
1345        }
1346
1347        if (etm->synth_opts.last_branch)
1348                attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
1349
1350        if (etm->synth_opts.instructions) {
1351                attr.config = PERF_COUNT_HW_INSTRUCTIONS;
1352                attr.sample_period = etm->synth_opts.period;
1353                etm->instructions_sample_period = attr.sample_period;
1354                err = cs_etm__synth_event(session, &attr, id);
1355                if (err)
1356                        return err;
1357                etm->sample_instructions = true;
1358                etm->instructions_sample_type = attr.sample_type;
1359                etm->instructions_id = id;
1360                id += 1;
1361        }
1362
1363        return 0;
1364}
1365
1366static int cs_etm__sample(struct cs_etm_queue *etmq,
1367                          struct cs_etm_traceid_queue *tidq)
1368{
1369        struct cs_etm_auxtrace *etm = etmq->etm;
1370        int ret;
1371        u8 trace_chan_id = tidq->trace_chan_id;
1372        u64 instrs_prev;
1373
1374        /* Get instructions remainder from previous packet */
1375        instrs_prev = tidq->period_instructions;
1376
1377        tidq->period_instructions += tidq->packet->instr_count;
1378
1379        /*
1380         * Record a branch when the last instruction in
1381         * PREV_PACKET is a branch.
1382         */
1383        if (etm->synth_opts.last_branch &&
1384            tidq->prev_packet->sample_type == CS_ETM_RANGE &&
1385            tidq->prev_packet->last_instr_taken_branch)
1386                cs_etm__update_last_branch_rb(etmq, tidq);
1387
1388        if (etm->sample_instructions &&
1389            tidq->period_instructions >= etm->instructions_sample_period) {
1390                /*
1391                 * Emit instruction sample periodically
1392                 * TODO: allow period to be defined in cycles and clock time
1393                 */
1394
1395                /*
1396                 * Below diagram demonstrates the instruction samples
1397                 * generation flows:
1398                 *
1399                 *    Instrs     Instrs       Instrs       Instrs
1400                 *   Sample(n)  Sample(n+1)  Sample(n+2)  Sample(n+3)
1401                 *    |            |            |            |
1402                 *    V            V            V            V
1403                 *   --------------------------------------------------
1404                 *            ^                                  ^
1405                 *            |                                  |
1406                 *         Period                             Period
1407                 *    instructions(Pi)                   instructions(Pi')
1408                 *
1409                 *            |                                  |
1410                 *            \---------------- -----------------/
1411                 *                             V
1412                 *                 tidq->packet->instr_count
1413                 *
1414                 * Instrs Sample(n...) are the synthesised samples occurring
1415                 * every etm->instructions_sample_period instructions - as
1416                 * defined on the perf command line.  Sample(n) is being the
1417                 * last sample before the current etm packet, n+1 to n+3
1418                 * samples are generated from the current etm packet.
1419                 *
1420                 * tidq->packet->instr_count represents the number of
1421                 * instructions in the current etm packet.
1422                 *
1423                 * Period instructions (Pi) contains the the number of
1424                 * instructions executed after the sample point(n) from the
1425                 * previous etm packet.  This will always be less than
1426                 * etm->instructions_sample_period.
1427                 *
1428                 * When generate new samples, it combines with two parts
1429                 * instructions, one is the tail of the old packet and another
1430                 * is the head of the new coming packet, to generate
1431                 * sample(n+1); sample(n+2) and sample(n+3) consume the
1432                 * instructions with sample period.  After sample(n+3), the rest
1433                 * instructions will be used by later packet and it is assigned
1434                 * to tidq->period_instructions for next round calculation.
1435                 */
1436
1437                /*
1438                 * Get the initial offset into the current packet instructions;
1439                 * entry conditions ensure that instrs_prev is less than
1440                 * etm->instructions_sample_period.
1441                 */
1442                u64 offset = etm->instructions_sample_period - instrs_prev;
1443                u64 addr;
1444
1445                /* Prepare last branches for instruction sample */
1446                if (etm->synth_opts.last_branch)
1447                        cs_etm__copy_last_branch_rb(etmq, tidq);
1448
1449                while (tidq->period_instructions >=
1450                                etm->instructions_sample_period) {
1451                        /*
1452                         * Calculate the address of the sampled instruction (-1
1453                         * as sample is reported as though instruction has just
1454                         * been executed, but PC has not advanced to next
1455                         * instruction)
1456                         */
1457                        addr = cs_etm__instr_addr(etmq, trace_chan_id,
1458                                                  tidq->packet, offset - 1);
1459                        ret = cs_etm__synth_instruction_sample(
1460                                etmq, tidq, addr,
1461                                etm->instructions_sample_period);
1462                        if (ret)
1463                                return ret;
1464
1465                        offset += etm->instructions_sample_period;
1466                        tidq->period_instructions -=
1467                                etm->instructions_sample_period;
1468                }
1469        }
1470
1471        if (etm->sample_branches) {
1472                bool generate_sample = false;
1473
1474                /* Generate sample for tracing on packet */
1475                if (tidq->prev_packet->sample_type == CS_ETM_DISCONTINUITY)
1476                        generate_sample = true;
1477
1478                /* Generate sample for branch taken packet */
1479                if (tidq->prev_packet->sample_type == CS_ETM_RANGE &&
1480                    tidq->prev_packet->last_instr_taken_branch)
1481                        generate_sample = true;
1482
1483                if (generate_sample) {
1484                        ret = cs_etm__synth_branch_sample(etmq, tidq);
1485                        if (ret)
1486                                return ret;
1487                }
1488        }
1489
1490        cs_etm__packet_swap(etm, tidq);
1491
1492        return 0;
1493}
1494
1495static int cs_etm__exception(struct cs_etm_traceid_queue *tidq)
1496{
1497        /*
1498         * When the exception packet is inserted, whether the last instruction
1499         * in previous range packet is taken branch or not, we need to force
1500         * to set 'prev_packet->last_instr_taken_branch' to true.  This ensures
1501         * to generate branch sample for the instruction range before the
1502         * exception is trapped to kernel or before the exception returning.
1503         *
1504         * The exception packet includes the dummy address values, so don't
1505         * swap PACKET with PREV_PACKET.  This keeps PREV_PACKET to be useful
1506         * for generating instruction and branch samples.
1507         */
1508        if (tidq->prev_packet->sample_type == CS_ETM_RANGE)
1509                tidq->prev_packet->last_instr_taken_branch = true;
1510
1511        return 0;
1512}
1513
1514static int cs_etm__flush(struct cs_etm_queue *etmq,
1515                         struct cs_etm_traceid_queue *tidq)
1516{
1517        int err = 0;
1518        struct cs_etm_auxtrace *etm = etmq->etm;
1519
1520        /* Handle start tracing packet */
1521        if (tidq->prev_packet->sample_type == CS_ETM_EMPTY)
1522                goto swap_packet;
1523
1524        if (etmq->etm->synth_opts.last_branch &&
1525            tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1526                u64 addr;
1527
1528                /* Prepare last branches for instruction sample */
1529                cs_etm__copy_last_branch_rb(etmq, tidq);
1530
1531                /*
1532                 * Generate a last branch event for the branches left in the
1533                 * circular buffer at the end of the trace.
1534                 *
1535                 * Use the address of the end of the last reported execution
1536                 * range
1537                 */
1538                addr = cs_etm__last_executed_instr(tidq->prev_packet);
1539
1540                err = cs_etm__synth_instruction_sample(
1541                        etmq, tidq, addr,
1542                        tidq->period_instructions);
1543                if (err)
1544                        return err;
1545
1546                tidq->period_instructions = 0;
1547
1548        }
1549
1550        if (etm->sample_branches &&
1551            tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1552                err = cs_etm__synth_branch_sample(etmq, tidq);
1553                if (err)
1554                        return err;
1555        }
1556
1557swap_packet:
1558        cs_etm__packet_swap(etm, tidq);
1559
1560        /* Reset last branches after flush the trace */
1561        if (etm->synth_opts.last_branch)
1562                cs_etm__reset_last_branch_rb(tidq);
1563
1564        return err;
1565}
1566
1567static int cs_etm__end_block(struct cs_etm_queue *etmq,
1568                             struct cs_etm_traceid_queue *tidq)
1569{
1570        int err;
1571
1572        /*
1573         * It has no new packet coming and 'etmq->packet' contains the stale
1574         * packet which was set at the previous time with packets swapping;
1575         * so skip to generate branch sample to avoid stale packet.
1576         *
1577         * For this case only flush branch stack and generate a last branch
1578         * event for the branches left in the circular buffer at the end of
1579         * the trace.
1580         */
1581        if (etmq->etm->synth_opts.last_branch &&
1582            tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1583                u64 addr;
1584
1585                /* Prepare last branches for instruction sample */
1586                cs_etm__copy_last_branch_rb(etmq, tidq);
1587
1588                /*
1589                 * Use the address of the end of the last reported execution
1590                 * range.
1591                 */
1592                addr = cs_etm__last_executed_instr(tidq->prev_packet);
1593
1594                err = cs_etm__synth_instruction_sample(
1595                        etmq, tidq, addr,
1596                        tidq->period_instructions);
1597                if (err)
1598                        return err;
1599
1600                tidq->period_instructions = 0;
1601        }
1602
1603        return 0;
1604}
1605/*
1606 * cs_etm__get_data_block: Fetch a block from the auxtrace_buffer queue
1607 *                         if need be.
1608 * Returns:     < 0     if error
1609 *              = 0     if no more auxtrace_buffer to read
1610 *              > 0     if the current buffer isn't empty yet
1611 */
1612static int cs_etm__get_data_block(struct cs_etm_queue *etmq)
1613{
1614        int ret;
1615
1616        if (!etmq->buf_len) {
1617                ret = cs_etm__get_trace(etmq);
1618                if (ret <= 0)
1619                        return ret;
1620                /*
1621                 * We cannot assume consecutive blocks in the data file
1622                 * are contiguous, reset the decoder to force re-sync.
1623                 */
1624                ret = cs_etm_decoder__reset(etmq->decoder);
1625                if (ret)
1626                        return ret;
1627        }
1628
1629        return etmq->buf_len;
1630}
1631
1632static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq, u8 trace_chan_id,
1633                                 struct cs_etm_packet *packet,
1634                                 u64 end_addr)
1635{
1636        /* Initialise to keep compiler happy */
1637        u16 instr16 = 0;
1638        u32 instr32 = 0;
1639        u64 addr;
1640
1641        switch (packet->isa) {
1642        case CS_ETM_ISA_T32:
1643                /*
1644                 * The SVC of T32 is defined in ARM DDI 0487D.a, F5.1.247:
1645                 *
1646                 *  b'15         b'8
1647                 * +-----------------+--------+
1648                 * | 1 1 0 1 1 1 1 1 |  imm8  |
1649                 * +-----------------+--------+
1650                 *
1651                 * According to the specifiction, it only defines SVC for T32
1652                 * with 16 bits instruction and has no definition for 32bits;
1653                 * so below only read 2 bytes as instruction size for T32.
1654                 */
1655                addr = end_addr - 2;
1656                cs_etm__mem_access(etmq, trace_chan_id, addr,
1657                                   sizeof(instr16), (u8 *)&instr16);
1658                if ((instr16 & 0xFF00) == 0xDF00)
1659                        return true;
1660
1661                break;
1662        case CS_ETM_ISA_A32:
1663                /*
1664                 * The SVC of A32 is defined in ARM DDI 0487D.a, F5.1.247:
1665                 *
1666                 *  b'31 b'28 b'27 b'24
1667                 * +---------+---------+-------------------------+
1668                 * |  !1111  | 1 1 1 1 |        imm24            |
1669                 * +---------+---------+-------------------------+
1670                 */
1671                addr = end_addr - 4;
1672                cs_etm__mem_access(etmq, trace_chan_id, addr,
1673                                   sizeof(instr32), (u8 *)&instr32);
1674                if ((instr32 & 0x0F000000) == 0x0F000000 &&
1675                    (instr32 & 0xF0000000) != 0xF0000000)
1676                        return true;
1677
1678                break;
1679        case CS_ETM_ISA_A64:
1680                /*
1681                 * The SVC of A64 is defined in ARM DDI 0487D.a, C6.2.294:
1682                 *
1683                 *  b'31               b'21           b'4     b'0
1684                 * +-----------------------+---------+-----------+
1685                 * | 1 1 0 1 0 1 0 0 0 0 0 |  imm16  | 0 0 0 0 1 |
1686                 * +-----------------------+---------+-----------+
1687                 */
1688                addr = end_addr - 4;
1689                cs_etm__mem_access(etmq, trace_chan_id, addr,
1690                                   sizeof(instr32), (u8 *)&instr32);
1691                if ((instr32 & 0xFFE0001F) == 0xd4000001)
1692                        return true;
1693
1694                break;
1695        case CS_ETM_ISA_UNKNOWN:
1696        default:
1697                break;
1698        }
1699
1700        return false;
1701}
1702
1703static bool cs_etm__is_syscall(struct cs_etm_queue *etmq,
1704                               struct cs_etm_traceid_queue *tidq, u64 magic)
1705{
1706        u8 trace_chan_id = tidq->trace_chan_id;
1707        struct cs_etm_packet *packet = tidq->packet;
1708        struct cs_etm_packet *prev_packet = tidq->prev_packet;
1709
1710        if (magic == __perf_cs_etmv3_magic)
1711                if (packet->exception_number == CS_ETMV3_EXC_SVC)
1712                        return true;
1713
1714        /*
1715         * ETMv4 exception type CS_ETMV4_EXC_CALL covers SVC, SMC and
1716         * HVC cases; need to check if it's SVC instruction based on
1717         * packet address.
1718         */
1719        if (magic == __perf_cs_etmv4_magic) {
1720                if (packet->exception_number == CS_ETMV4_EXC_CALL &&
1721                    cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
1722                                         prev_packet->end_addr))
1723                        return true;
1724        }
1725
1726        return false;
1727}
1728
1729static bool cs_etm__is_async_exception(struct cs_etm_traceid_queue *tidq,
1730                                       u64 magic)
1731{
1732        struct cs_etm_packet *packet = tidq->packet;
1733
1734        if (magic == __perf_cs_etmv3_magic)
1735                if (packet->exception_number == CS_ETMV3_EXC_DEBUG_HALT ||
1736                    packet->exception_number == CS_ETMV3_EXC_ASYNC_DATA_ABORT ||
1737                    packet->exception_number == CS_ETMV3_EXC_PE_RESET ||
1738                    packet->exception_number == CS_ETMV3_EXC_IRQ ||
1739                    packet->exception_number == CS_ETMV3_EXC_FIQ)
1740                        return true;
1741
1742        if (magic == __perf_cs_etmv4_magic)
1743                if (packet->exception_number == CS_ETMV4_EXC_RESET ||
1744                    packet->exception_number == CS_ETMV4_EXC_DEBUG_HALT ||
1745                    packet->exception_number == CS_ETMV4_EXC_SYSTEM_ERROR ||
1746                    packet->exception_number == CS_ETMV4_EXC_INST_DEBUG ||
1747                    packet->exception_number == CS_ETMV4_EXC_DATA_DEBUG ||
1748                    packet->exception_number == CS_ETMV4_EXC_IRQ ||
1749                    packet->exception_number == CS_ETMV4_EXC_FIQ)
1750                        return true;
1751
1752        return false;
1753}
1754
1755static bool cs_etm__is_sync_exception(struct cs_etm_queue *etmq,
1756                                      struct cs_etm_traceid_queue *tidq,
1757                                      u64 magic)
1758{
1759        u8 trace_chan_id = tidq->trace_chan_id;
1760        struct cs_etm_packet *packet = tidq->packet;
1761        struct cs_etm_packet *prev_packet = tidq->prev_packet;
1762
1763        if (magic == __perf_cs_etmv3_magic)
1764                if (packet->exception_number == CS_ETMV3_EXC_SMC ||
1765                    packet->exception_number == CS_ETMV3_EXC_HYP ||
1766                    packet->exception_number == CS_ETMV3_EXC_JAZELLE_THUMBEE ||
1767                    packet->exception_number == CS_ETMV3_EXC_UNDEFINED_INSTR ||
1768                    packet->exception_number == CS_ETMV3_EXC_PREFETCH_ABORT ||
1769                    packet->exception_number == CS_ETMV3_EXC_DATA_FAULT ||
1770                    packet->exception_number == CS_ETMV3_EXC_GENERIC)
1771                        return true;
1772
1773        if (magic == __perf_cs_etmv4_magic) {
1774                if (packet->exception_number == CS_ETMV4_EXC_TRAP ||
1775                    packet->exception_number == CS_ETMV4_EXC_ALIGNMENT ||
1776                    packet->exception_number == CS_ETMV4_EXC_INST_FAULT ||
1777                    packet->exception_number == CS_ETMV4_EXC_DATA_FAULT)
1778                        return true;
1779
1780                /*
1781                 * For CS_ETMV4_EXC_CALL, except SVC other instructions
1782                 * (SMC, HVC) are taken as sync exceptions.
1783                 */
1784                if (packet->exception_number == CS_ETMV4_EXC_CALL &&
1785                    !cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
1786                                          prev_packet->end_addr))
1787                        return true;
1788
1789                /*
1790                 * ETMv4 has 5 bits for exception number; if the numbers
1791                 * are in the range ( CS_ETMV4_EXC_FIQ, CS_ETMV4_EXC_END ]
1792                 * they are implementation defined exceptions.
1793                 *
1794                 * For this case, simply take it as sync exception.
1795                 */
1796                if (packet->exception_number > CS_ETMV4_EXC_FIQ &&
1797                    packet->exception_number <= CS_ETMV4_EXC_END)
1798                        return true;
1799        }
1800
1801        return false;
1802}
1803
1804static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq,
1805                                    struct cs_etm_traceid_queue *tidq)
1806{
1807        struct cs_etm_packet *packet = tidq->packet;
1808        struct cs_etm_packet *prev_packet = tidq->prev_packet;
1809        u8 trace_chan_id = tidq->trace_chan_id;
1810        u64 magic;
1811        int ret;
1812
1813        switch (packet->sample_type) {
1814        case CS_ETM_RANGE:
1815                /*
1816                 * Immediate branch instruction without neither link nor
1817                 * return flag, it's normal branch instruction within
1818                 * the function.
1819                 */
1820                if (packet->last_instr_type == OCSD_INSTR_BR &&
1821                    packet->last_instr_subtype == OCSD_S_INSTR_NONE) {
1822                        packet->flags = PERF_IP_FLAG_BRANCH;
1823
1824                        if (packet->last_instr_cond)
1825                                packet->flags |= PERF_IP_FLAG_CONDITIONAL;
1826                }
1827
1828                /*
1829                 * Immediate branch instruction with link (e.g. BL), this is
1830                 * branch instruction for function call.
1831                 */
1832                if (packet->last_instr_type == OCSD_INSTR_BR &&
1833                    packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
1834                        packet->flags = PERF_IP_FLAG_BRANCH |
1835                                        PERF_IP_FLAG_CALL;
1836
1837                /*
1838                 * Indirect branch instruction with link (e.g. BLR), this is
1839                 * branch instruction for function call.
1840                 */
1841                if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1842                    packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
1843                        packet->flags = PERF_IP_FLAG_BRANCH |
1844                                        PERF_IP_FLAG_CALL;
1845
1846                /*
1847                 * Indirect branch instruction with subtype of
1848                 * OCSD_S_INSTR_V7_IMPLIED_RET, this is explicit hint for
1849                 * function return for A32/T32.
1850                 */
1851                if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1852                    packet->last_instr_subtype == OCSD_S_INSTR_V7_IMPLIED_RET)
1853                        packet->flags = PERF_IP_FLAG_BRANCH |
1854                                        PERF_IP_FLAG_RETURN;
1855
1856                /*
1857                 * Indirect branch instruction without link (e.g. BR), usually
1858                 * this is used for function return, especially for functions
1859                 * within dynamic link lib.
1860                 */
1861                if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1862                    packet->last_instr_subtype == OCSD_S_INSTR_NONE)
1863                        packet->flags = PERF_IP_FLAG_BRANCH |
1864                                        PERF_IP_FLAG_RETURN;
1865
1866                /* Return instruction for function return. */
1867                if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1868                    packet->last_instr_subtype == OCSD_S_INSTR_V8_RET)
1869                        packet->flags = PERF_IP_FLAG_BRANCH |
1870                                        PERF_IP_FLAG_RETURN;
1871
1872                /*
1873                 * Decoder might insert a discontinuity in the middle of
1874                 * instruction packets, fixup prev_packet with flag
1875                 * PERF_IP_FLAG_TRACE_BEGIN to indicate restarting trace.
1876                 */
1877                if (prev_packet->sample_type == CS_ETM_DISCONTINUITY)
1878                        prev_packet->flags |= PERF_IP_FLAG_BRANCH |
1879                                              PERF_IP_FLAG_TRACE_BEGIN;
1880
1881                /*
1882                 * If the previous packet is an exception return packet
1883                 * and the return address just follows SVC instuction,
1884                 * it needs to calibrate the previous packet sample flags
1885                 * as PERF_IP_FLAG_SYSCALLRET.
1886                 */
1887                if (prev_packet->flags == (PERF_IP_FLAG_BRANCH |
1888                                           PERF_IP_FLAG_RETURN |
1889                                           PERF_IP_FLAG_INTERRUPT) &&
1890                    cs_etm__is_svc_instr(etmq, trace_chan_id,
1891                                         packet, packet->start_addr))
1892                        prev_packet->flags = PERF_IP_FLAG_BRANCH |
1893                                             PERF_IP_FLAG_RETURN |
1894                                             PERF_IP_FLAG_SYSCALLRET;
1895                break;
1896        case CS_ETM_DISCONTINUITY:
1897                /*
1898                 * The trace is discontinuous, if the previous packet is
1899                 * instruction packet, set flag PERF_IP_FLAG_TRACE_END
1900                 * for previous packet.
1901                 */
1902                if (prev_packet->sample_type == CS_ETM_RANGE)
1903                        prev_packet->flags |= PERF_IP_FLAG_BRANCH |
1904                                              PERF_IP_FLAG_TRACE_END;
1905                break;
1906        case CS_ETM_EXCEPTION:
1907                ret = cs_etm__get_magic(packet->trace_chan_id, &magic);
1908                if (ret)
1909                        return ret;
1910
1911                /* The exception is for system call. */
1912                if (cs_etm__is_syscall(etmq, tidq, magic))
1913                        packet->flags = PERF_IP_FLAG_BRANCH |
1914                                        PERF_IP_FLAG_CALL |
1915                                        PERF_IP_FLAG_SYSCALLRET;
1916                /*
1917                 * The exceptions are triggered by external signals from bus,
1918                 * interrupt controller, debug module, PE reset or halt.
1919                 */
1920                else if (cs_etm__is_async_exception(tidq, magic))
1921                        packet->flags = PERF_IP_FLAG_BRANCH |
1922                                        PERF_IP_FLAG_CALL |
1923                                        PERF_IP_FLAG_ASYNC |
1924                                        PERF_IP_FLAG_INTERRUPT;
1925                /*
1926                 * Otherwise, exception is caused by trap, instruction &
1927                 * data fault, or alignment errors.
1928                 */
1929                else if (cs_etm__is_sync_exception(etmq, tidq, magic))
1930                        packet->flags = PERF_IP_FLAG_BRANCH |
1931                                        PERF_IP_FLAG_CALL |
1932                                        PERF_IP_FLAG_INTERRUPT;
1933
1934                /*
1935                 * When the exception packet is inserted, since exception
1936                 * packet is not used standalone for generating samples
1937                 * and it's affiliation to the previous instruction range
1938                 * packet; so set previous range packet flags to tell perf
1939                 * it is an exception taken branch.
1940                 */
1941                if (prev_packet->sample_type == CS_ETM_RANGE)
1942                        prev_packet->flags = packet->flags;
1943                break;
1944        case CS_ETM_EXCEPTION_RET:
1945                /*
1946                 * When the exception return packet is inserted, since
1947                 * exception return packet is not used standalone for
1948                 * generating samples and it's affiliation to the previous
1949                 * instruction range packet; so set previous range packet
1950                 * flags to tell perf it is an exception return branch.
1951                 *
1952                 * The exception return can be for either system call or
1953                 * other exception types; unfortunately the packet doesn't
1954                 * contain exception type related info so we cannot decide
1955                 * the exception type purely based on exception return packet.
1956                 * If we record the exception number from exception packet and
1957                 * reuse it for excpetion return packet, this is not reliable
1958                 * due the trace can be discontinuity or the interrupt can
1959                 * be nested, thus the recorded exception number cannot be
1960                 * used for exception return packet for these two cases.
1961                 *
1962                 * For exception return packet, we only need to distinguish the
1963                 * packet is for system call or for other types.  Thus the
1964                 * decision can be deferred when receive the next packet which
1965                 * contains the return address, based on the return address we
1966                 * can read out the previous instruction and check if it's a
1967                 * system call instruction and then calibrate the sample flag
1968                 * as needed.
1969                 */
1970                if (prev_packet->sample_type == CS_ETM_RANGE)
1971                        prev_packet->flags = PERF_IP_FLAG_BRANCH |
1972                                             PERF_IP_FLAG_RETURN |
1973                                             PERF_IP_FLAG_INTERRUPT;
1974                break;
1975        case CS_ETM_EMPTY:
1976        default:
1977                break;
1978        }
1979
1980        return 0;
1981}
1982
1983static int cs_etm__decode_data_block(struct cs_etm_queue *etmq)
1984{
1985        int ret = 0;
1986        size_t processed = 0;
1987
1988        /*
1989         * Packets are decoded and added to the decoder's packet queue
1990         * until the decoder packet processing callback has requested that
1991         * processing stops or there is nothing left in the buffer.  Normal
1992         * operations that stop processing are a timestamp packet or a full
1993         * decoder buffer queue.
1994         */
1995        ret = cs_etm_decoder__process_data_block(etmq->decoder,
1996                                                 etmq->offset,
1997                                                 &etmq->buf[etmq->buf_used],
1998                                                 etmq->buf_len,
1999                                                 &processed);
2000        if (ret)
2001                goto out;
2002
2003        etmq->offset += processed;
2004        etmq->buf_used += processed;
2005        etmq->buf_len -= processed;
2006
2007out:
2008        return ret;
2009}
2010
2011static int cs_etm__process_traceid_queue(struct cs_etm_queue *etmq,
2012                                         struct cs_etm_traceid_queue *tidq)
2013{
2014        int ret;
2015        struct cs_etm_packet_queue *packet_queue;
2016
2017        packet_queue = &tidq->packet_queue;
2018
2019        /* Process each packet in this chunk */
2020        while (1) {
2021                ret = cs_etm_decoder__get_packet(packet_queue,
2022                                                 tidq->packet);
2023                if (ret <= 0)
2024                        /*
2025                         * Stop processing this chunk on
2026                         * end of data or error
2027                         */
2028                        break;
2029
2030                /*
2031                 * Since packet addresses are swapped in packet
2032                 * handling within below switch() statements,
2033                 * thus setting sample flags must be called
2034                 * prior to switch() statement to use address
2035                 * information before packets swapping.
2036                 */
2037                ret = cs_etm__set_sample_flags(etmq, tidq);
2038                if (ret < 0)
2039                        break;
2040
2041                switch (tidq->packet->sample_type) {
2042                case CS_ETM_RANGE:
2043                        /*
2044                         * If the packet contains an instruction
2045                         * range, generate instruction sequence
2046                         * events.
2047                         */
2048                        cs_etm__sample(etmq, tidq);
2049                        break;
2050                case CS_ETM_EXCEPTION:
2051                case CS_ETM_EXCEPTION_RET:
2052                        /*
2053                         * If the exception packet is coming,
2054                         * make sure the previous instruction
2055                         * range packet to be handled properly.
2056                         */
2057                        cs_etm__exception(tidq);
2058                        break;
2059                case CS_ETM_DISCONTINUITY:
2060                        /*
2061                         * Discontinuity in trace, flush
2062                         * previous branch stack
2063                         */
2064                        cs_etm__flush(etmq, tidq);
2065                        break;
2066                case CS_ETM_EMPTY:
2067                        /*
2068                         * Should not receive empty packet,
2069                         * report error.
2070                         */
2071                        pr_err("CS ETM Trace: empty packet\n");
2072                        return -EINVAL;
2073                default:
2074                        break;
2075                }
2076        }
2077
2078        return ret;
2079}
2080
2081static void cs_etm__clear_all_traceid_queues(struct cs_etm_queue *etmq)
2082{
2083        int idx;
2084        struct int_node *inode;
2085        struct cs_etm_traceid_queue *tidq;
2086        struct intlist *traceid_queues_list = etmq->traceid_queues_list;
2087
2088        intlist__for_each_entry(inode, traceid_queues_list) {
2089                idx = (int)(intptr_t)inode->priv;
2090                tidq = etmq->traceid_queues[idx];
2091
2092                /* Ignore return value */
2093                cs_etm__process_traceid_queue(etmq, tidq);
2094
2095                /*
2096                 * Generate an instruction sample with the remaining
2097                 * branchstack entries.
2098                 */
2099                cs_etm__flush(etmq, tidq);
2100        }
2101}
2102
2103static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
2104{
2105        int err = 0;
2106        struct cs_etm_traceid_queue *tidq;
2107
2108        tidq = cs_etm__etmq_get_traceid_queue(etmq, CS_ETM_PER_THREAD_TRACEID);
2109        if (!tidq)
2110                return -EINVAL;
2111
2112        /* Go through each buffer in the queue and decode them one by one */
2113        while (1) {
2114                err = cs_etm__get_data_block(etmq);
2115                if (err <= 0)
2116                        return err;
2117
2118                /* Run trace decoder until buffer consumed or end of trace */
2119                do {
2120                        err = cs_etm__decode_data_block(etmq);
2121                        if (err)
2122                                return err;
2123
2124                        /*
2125                         * Process each packet in this chunk, nothing to do if
2126                         * an error occurs other than hoping the next one will
2127                         * be better.
2128                         */
2129                        err = cs_etm__process_traceid_queue(etmq, tidq);
2130
2131                } while (etmq->buf_len);
2132
2133                if (err == 0)
2134                        /* Flush any remaining branch stack entries */
2135                        err = cs_etm__end_block(etmq, tidq);
2136        }
2137
2138        return err;
2139}
2140
2141static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
2142                                           pid_t tid)
2143{
2144        unsigned int i;
2145        struct auxtrace_queues *queues = &etm->queues;
2146
2147        for (i = 0; i < queues->nr_queues; i++) {
2148                struct auxtrace_queue *queue = &etm->queues.queue_array[i];
2149                struct cs_etm_queue *etmq = queue->priv;
2150                struct cs_etm_traceid_queue *tidq;
2151
2152                if (!etmq)
2153                        continue;
2154
2155                tidq = cs_etm__etmq_get_traceid_queue(etmq,
2156                                                CS_ETM_PER_THREAD_TRACEID);
2157
2158                if (!tidq)
2159                        continue;
2160
2161                if ((tid == -1) || (tidq->tid == tid)) {
2162                        cs_etm__set_pid_tid_cpu(etm, tidq);
2163                        cs_etm__run_decoder(etmq);
2164                }
2165        }
2166
2167        return 0;
2168}
2169
2170static int cs_etm__process_queues(struct cs_etm_auxtrace *etm)
2171{
2172        int ret = 0;
2173        unsigned int cs_queue_nr, queue_nr;
2174        u8 trace_chan_id;
2175        u64 timestamp;
2176        struct auxtrace_queue *queue;
2177        struct cs_etm_queue *etmq;
2178        struct cs_etm_traceid_queue *tidq;
2179
2180        while (1) {
2181                if (!etm->heap.heap_cnt)
2182                        goto out;
2183
2184                /* Take the entry at the top of the min heap */
2185                cs_queue_nr = etm->heap.heap_array[0].queue_nr;
2186                queue_nr = TO_QUEUE_NR(cs_queue_nr);
2187                trace_chan_id = TO_TRACE_CHAN_ID(cs_queue_nr);
2188                queue = &etm->queues.queue_array[queue_nr];
2189                etmq = queue->priv;
2190
2191                /*
2192                 * Remove the top entry from the heap since we are about
2193                 * to process it.
2194                 */
2195                auxtrace_heap__pop(&etm->heap);
2196
2197                tidq  = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
2198                if (!tidq) {
2199                        /*
2200                         * No traceID queue has been allocated for this traceID,
2201                         * which means something somewhere went very wrong.  No
2202                         * other choice than simply exit.
2203                         */
2204                        ret = -EINVAL;
2205                        goto out;
2206                }
2207
2208                /*
2209                 * Packets associated with this timestamp are already in
2210                 * the etmq's traceID queue, so process them.
2211                 */
2212                ret = cs_etm__process_traceid_queue(etmq, tidq);
2213                if (ret < 0)
2214                        goto out;
2215
2216                /*
2217                 * Packets for this timestamp have been processed, time to
2218                 * move on to the next timestamp, fetching a new auxtrace_buffer
2219                 * if need be.
2220                 */
2221refetch:
2222                ret = cs_etm__get_data_block(etmq);
2223                if (ret < 0)
2224                        goto out;
2225
2226                /*
2227                 * No more auxtrace_buffers to process in this etmq, simply
2228                 * move on to another entry in the auxtrace_heap.
2229                 */
2230                if (!ret)
2231                        continue;
2232
2233                ret = cs_etm__decode_data_block(etmq);
2234                if (ret)
2235                        goto out;
2236
2237                timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
2238
2239                if (!timestamp) {
2240                        /*
2241                         * Function cs_etm__decode_data_block() returns when
2242                         * there is no more traces to decode in the current
2243                         * auxtrace_buffer OR when a timestamp has been
2244                         * encountered on any of the traceID queues.  Since we
2245                         * did not get a timestamp, there is no more traces to
2246                         * process in this auxtrace_buffer.  As such empty and
2247                         * flush all traceID queues.
2248                         */
2249                        cs_etm__clear_all_traceid_queues(etmq);
2250
2251                        /* Fetch another auxtrace_buffer for this etmq */
2252                        goto refetch;
2253                }
2254
2255                /*
2256                 * Add to the min heap the timestamp for packets that have
2257                 * just been decoded.  They will be processed and synthesized
2258                 * during the next call to cs_etm__process_traceid_queue() for
2259                 * this queue/traceID.
2260                 */
2261                cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
2262                ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, timestamp);
2263        }
2264
2265out:
2266        return ret;
2267}
2268
2269static int cs_etm__process_itrace_start(struct cs_etm_auxtrace *etm,
2270                                        union perf_event *event)
2271{
2272        struct thread *th;
2273
2274        if (etm->timeless_decoding)
2275                return 0;
2276
2277        /*
2278         * Add the tid/pid to the log so that we can get a match when
2279         * we get a contextID from the decoder.
2280         */
2281        th = machine__findnew_thread(etm->machine,
2282                                     event->itrace_start.pid,
2283                                     event->itrace_start.tid);
2284        if (!th)
2285                return -ENOMEM;
2286
2287        thread__put(th);
2288
2289        return 0;
2290}
2291
2292static int cs_etm__process_switch_cpu_wide(struct cs_etm_auxtrace *etm,
2293                                           union perf_event *event)
2294{
2295        struct thread *th;
2296        bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
2297
2298        /*
2299         * Context switch in per-thread mode are irrelevant since perf
2300         * will start/stop tracing as the process is scheduled.
2301         */
2302        if (etm->timeless_decoding)
2303                return 0;
2304
2305        /*
2306         * SWITCH_IN events carry the next process to be switched out while
2307         * SWITCH_OUT events carry the process to be switched in.  As such
2308         * we don't care about IN events.
2309         */
2310        if (!out)
2311                return 0;
2312
2313        /*
2314         * Add the tid/pid to the log so that we can get a match when
2315         * we get a contextID from the decoder.
2316         */
2317        th = machine__findnew_thread(etm->machine,
2318                                     event->context_switch.next_prev_pid,
2319                                     event->context_switch.next_prev_tid);
2320        if (!th)
2321                return -ENOMEM;
2322
2323        thread__put(th);
2324
2325        return 0;
2326}
2327
2328static int cs_etm__process_event(struct perf_session *session,
2329                                 union perf_event *event,
2330                                 struct perf_sample *sample,
2331                                 struct perf_tool *tool)
2332{
2333        int err = 0;
2334        u64 timestamp;
2335        struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2336                                                   struct cs_etm_auxtrace,
2337                                                   auxtrace);
2338
2339        if (dump_trace)
2340                return 0;
2341
2342        if (!tool->ordered_events) {
2343                pr_err("CoreSight ETM Trace requires ordered events\n");
2344                return -EINVAL;
2345        }
2346
2347        if (sample->time && (sample->time != (u64) -1))
2348                timestamp = sample->time;
2349        else
2350                timestamp = 0;
2351
2352        if (timestamp || etm->timeless_decoding) {
2353                err = cs_etm__update_queues(etm);
2354                if (err)
2355                        return err;
2356        }
2357
2358        if (etm->timeless_decoding &&
2359            event->header.type == PERF_RECORD_EXIT)
2360                return cs_etm__process_timeless_queues(etm,
2361                                                       event->fork.tid);
2362
2363        if (event->header.type == PERF_RECORD_ITRACE_START)
2364                return cs_etm__process_itrace_start(etm, event);
2365        else if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
2366                return cs_etm__process_switch_cpu_wide(etm, event);
2367
2368        if (!etm->timeless_decoding &&
2369            event->header.type == PERF_RECORD_AUX)
2370                return cs_etm__process_queues(etm);
2371
2372        return 0;
2373}
2374
2375static int cs_etm__process_auxtrace_event(struct perf_session *session,
2376                                          union perf_event *event,
2377                                          struct perf_tool *tool __maybe_unused)
2378{
2379        struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2380                                                   struct cs_etm_auxtrace,
2381                                                   auxtrace);
2382        if (!etm->data_queued) {
2383                struct auxtrace_buffer *buffer;
2384                off_t  data_offset;
2385                int fd = perf_data__fd(session->data);
2386                bool is_pipe = perf_data__is_pipe(session->data);
2387                int err;
2388
2389                if (is_pipe)
2390                        data_offset = 0;
2391                else {
2392                        data_offset = lseek(fd, 0, SEEK_CUR);
2393                        if (data_offset == -1)
2394                                return -errno;
2395                }
2396
2397                err = auxtrace_queues__add_event(&etm->queues, session,
2398                                                 event, data_offset, &buffer);
2399                if (err)
2400                        return err;
2401
2402                if (dump_trace)
2403                        if (auxtrace_buffer__get_data(buffer, fd)) {
2404                                cs_etm__dump_event(etm, buffer);
2405                                auxtrace_buffer__put_data(buffer);
2406                        }
2407        }
2408
2409        return 0;
2410}
2411
2412static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
2413{
2414        struct evsel *evsel;
2415        struct evlist *evlist = etm->session->evlist;
2416        bool timeless_decoding = true;
2417
2418        /*
2419         * Circle through the list of event and complain if we find one
2420         * with the time bit set.
2421         */
2422        evlist__for_each_entry(evlist, evsel) {
2423                if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
2424                        timeless_decoding = false;
2425        }
2426
2427        return timeless_decoding;
2428}
2429
2430static const char * const cs_etm_global_header_fmts[] = {
2431        [CS_HEADER_VERSION_0]   = "     Header version                 %llx\n",
2432        [CS_PMU_TYPE_CPUS]      = "     PMU type/num cpus              %llx\n",
2433        [CS_ETM_SNAPSHOT]       = "     Snapshot                       %llx\n",
2434};
2435
2436static const char * const cs_etm_priv_fmts[] = {
2437        [CS_ETM_MAGIC]          = "     Magic number                   %llx\n",
2438        [CS_ETM_CPU]            = "     CPU                            %lld\n",
2439        [CS_ETM_ETMCR]          = "     ETMCR                          %llx\n",
2440        [CS_ETM_ETMTRACEIDR]    = "     ETMTRACEIDR                    %llx\n",
2441        [CS_ETM_ETMCCER]        = "     ETMCCER                        %llx\n",
2442        [CS_ETM_ETMIDR]         = "     ETMIDR                         %llx\n",
2443};
2444
2445static const char * const cs_etmv4_priv_fmts[] = {
2446        [CS_ETM_MAGIC]          = "     Magic number                   %llx\n",
2447        [CS_ETM_CPU]            = "     CPU                            %lld\n",
2448        [CS_ETMV4_TRCCONFIGR]   = "     TRCCONFIGR                     %llx\n",
2449        [CS_ETMV4_TRCTRACEIDR]  = "     TRCTRACEIDR                    %llx\n",
2450        [CS_ETMV4_TRCIDR0]      = "     TRCIDR0                        %llx\n",
2451        [CS_ETMV4_TRCIDR1]      = "     TRCIDR1                        %llx\n",
2452        [CS_ETMV4_TRCIDR2]      = "     TRCIDR2                        %llx\n",
2453        [CS_ETMV4_TRCIDR8]      = "     TRCIDR8                        %llx\n",
2454        [CS_ETMV4_TRCAUTHSTATUS] = "    TRCAUTHSTATUS                  %llx\n",
2455};
2456
2457static void cs_etm__print_auxtrace_info(__u64 *val, int num)
2458{
2459        int i, j, cpu = 0;
2460
2461        for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
2462                fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
2463
2464        for (i = CS_HEADER_VERSION_0_MAX; cpu < num; cpu++) {
2465                if (val[i] == __perf_cs_etmv3_magic)
2466                        for (j = 0; j < CS_ETM_PRIV_MAX; j++, i++)
2467                                fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
2468                else if (val[i] == __perf_cs_etmv4_magic)
2469                        for (j = 0; j < CS_ETMV4_PRIV_MAX; j++, i++)
2470                                fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
2471                else
2472                        /* failure.. return */
2473                        return;
2474        }
2475}
2476
2477int cs_etm__process_auxtrace_info(union perf_event *event,
2478                                  struct perf_session *session)
2479{
2480        struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
2481        struct cs_etm_auxtrace *etm = NULL;
2482        struct int_node *inode;
2483        unsigned int pmu_type;
2484        int event_header_size = sizeof(struct perf_event_header);
2485        int info_header_size;
2486        int total_size = auxtrace_info->header.size;
2487        int priv_size = 0;
2488        int num_cpu;
2489        int err = 0, idx = -1;
2490        int i, j, k;
2491        u64 *ptr, *hdr = NULL;
2492        u64 **metadata = NULL;
2493
2494        /*
2495         * sizeof(auxtrace_info_event::type) +
2496         * sizeof(auxtrace_info_event::reserved) == 8
2497         */
2498        info_header_size = 8;
2499
2500        if (total_size < (event_header_size + info_header_size))
2501                return -EINVAL;
2502
2503        priv_size = total_size - event_header_size - info_header_size;
2504
2505        /* First the global part */
2506        ptr = (u64 *) auxtrace_info->priv;
2507
2508        /* Look for version '0' of the header */
2509        if (ptr[0] != 0)
2510                return -EINVAL;
2511
2512        hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_0_MAX);
2513        if (!hdr)
2514                return -ENOMEM;
2515
2516        /* Extract header information - see cs-etm.h for format */
2517        for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
2518                hdr[i] = ptr[i];
2519        num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
2520        pmu_type = (unsigned int) ((hdr[CS_PMU_TYPE_CPUS] >> 32) &
2521                                    0xffffffff);
2522
2523        /*
2524         * Create an RB tree for traceID-metadata tuple.  Since the conversion
2525         * has to be made for each packet that gets decoded, optimizing access
2526         * in anything other than a sequential array is worth doing.
2527         */
2528        traceid_list = intlist__new(NULL);
2529        if (!traceid_list) {
2530                err = -ENOMEM;
2531                goto err_free_hdr;
2532        }
2533
2534        metadata = zalloc(sizeof(*metadata) * num_cpu);
2535        if (!metadata) {
2536                err = -ENOMEM;
2537                goto err_free_traceid_list;
2538        }
2539
2540        /*
2541         * The metadata is stored in the auxtrace_info section and encodes
2542         * the configuration of the ARM embedded trace macrocell which is
2543         * required by the trace decoder to properly decode the trace due
2544         * to its highly compressed nature.
2545         */
2546        for (j = 0; j < num_cpu; j++) {
2547                if (ptr[i] == __perf_cs_etmv3_magic) {
2548                        metadata[j] = zalloc(sizeof(*metadata[j]) *
2549                                             CS_ETM_PRIV_MAX);
2550                        if (!metadata[j]) {
2551                                err = -ENOMEM;
2552                                goto err_free_metadata;
2553                        }
2554                        for (k = 0; k < CS_ETM_PRIV_MAX; k++)
2555                                metadata[j][k] = ptr[i + k];
2556
2557                        /* The traceID is our handle */
2558                        idx = metadata[j][CS_ETM_ETMTRACEIDR];
2559                        i += CS_ETM_PRIV_MAX;
2560                } else if (ptr[i] == __perf_cs_etmv4_magic) {
2561                        metadata[j] = zalloc(sizeof(*metadata[j]) *
2562                                             CS_ETMV4_PRIV_MAX);
2563                        if (!metadata[j]) {
2564                                err = -ENOMEM;
2565                                goto err_free_metadata;
2566                        }
2567                        for (k = 0; k < CS_ETMV4_PRIV_MAX; k++)
2568                                metadata[j][k] = ptr[i + k];
2569
2570                        /* The traceID is our handle */
2571                        idx = metadata[j][CS_ETMV4_TRCTRACEIDR];
2572                        i += CS_ETMV4_PRIV_MAX;
2573                }
2574
2575                /* Get an RB node for this CPU */
2576                inode = intlist__findnew(traceid_list, idx);
2577
2578                /* Something went wrong, no need to continue */
2579                if (!inode) {
2580                        err = -ENOMEM;
2581                        goto err_free_metadata;
2582                }
2583
2584                /*
2585                 * The node for that CPU should not be taken.
2586                 * Back out if that's the case.
2587                 */
2588                if (inode->priv) {
2589                        err = -EINVAL;
2590                        goto err_free_metadata;
2591                }
2592                /* All good, associate the traceID with the metadata pointer */
2593                inode->priv = metadata[j];
2594        }
2595
2596        /*
2597         * Each of CS_HEADER_VERSION_0_MAX, CS_ETM_PRIV_MAX and
2598         * CS_ETMV4_PRIV_MAX mark how many double words are in the
2599         * global metadata, and each cpu's metadata respectively.
2600         * The following tests if the correct number of double words was
2601         * present in the auxtrace info section.
2602         */
2603        if (i * 8 != priv_size) {
2604                err = -EINVAL;
2605                goto err_free_metadata;
2606        }
2607
2608        etm = zalloc(sizeof(*etm));
2609
2610        if (!etm) {
2611                err = -ENOMEM;
2612                goto err_free_metadata;
2613        }
2614
2615        err = auxtrace_queues__init(&etm->queues);
2616        if (err)
2617                goto err_free_etm;
2618
2619        etm->session = session;
2620        etm->machine = &session->machines.host;
2621
2622        etm->num_cpu = num_cpu;
2623        etm->pmu_type = pmu_type;
2624        etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
2625        etm->metadata = metadata;
2626        etm->auxtrace_type = auxtrace_info->type;
2627        etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
2628
2629        etm->auxtrace.process_event = cs_etm__process_event;
2630        etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
2631        etm->auxtrace.flush_events = cs_etm__flush_events;
2632        etm->auxtrace.free_events = cs_etm__free_events;
2633        etm->auxtrace.free = cs_etm__free;
2634        etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace;
2635        session->auxtrace = &etm->auxtrace;
2636
2637        etm->unknown_thread = thread__new(999999999, 999999999);
2638        if (!etm->unknown_thread) {
2639                err = -ENOMEM;
2640                goto err_free_queues;
2641        }
2642
2643        /*
2644         * Initialize list node so that at thread__zput() we can avoid
2645         * segmentation fault at list_del_init().
2646         */
2647        INIT_LIST_HEAD(&etm->unknown_thread->node);
2648
2649        err = thread__set_comm(etm->unknown_thread, "unknown", 0);
2650        if (err)
2651                goto err_delete_thread;
2652
2653        if (thread__init_maps(etm->unknown_thread, etm->machine)) {
2654                err = -ENOMEM;
2655                goto err_delete_thread;
2656        }
2657
2658        if (dump_trace) {
2659                cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
2660                return 0;
2661        }
2662
2663        if (session->itrace_synth_opts->set) {
2664                etm->synth_opts = *session->itrace_synth_opts;
2665        } else {
2666                itrace_synth_opts__set_default(&etm->synth_opts,
2667                                session->itrace_synth_opts->default_no_sample);
2668                etm->synth_opts.callchain = false;
2669        }
2670
2671        err = cs_etm__synth_events(etm, session);
2672        if (err)
2673                goto err_delete_thread;
2674
2675        err = auxtrace_queues__process_index(&etm->queues, session);
2676        if (err)
2677                goto err_delete_thread;
2678
2679        etm->data_queued = etm->queues.populated;
2680
2681        return 0;
2682
2683err_delete_thread:
2684        thread__zput(etm->unknown_thread);
2685err_free_queues:
2686        auxtrace_queues__free(&etm->queues);
2687        session->auxtrace = NULL;
2688err_free_etm:
2689        zfree(&etm);
2690err_free_metadata:
2691        /* No need to check @metadata[j], free(NULL) is supported */
2692        for (j = 0; j < num_cpu; j++)
2693                zfree(&metadata[j]);
2694        zfree(&metadata);
2695err_free_traceid_list:
2696        intlist__delete(traceid_list);
2697err_free_hdr:
2698        zfree(&hdr);
2699
2700        return err;
2701}
2702