1
2
3
4
5
6
7
8
9#include <linux/bitops.h>
10#include <linux/coresight-pmu.h>
11#include <linux/err.h>
12#include <linux/kernel.h>
13#include <linux/log2.h>
14#include <linux/types.h>
15#include <linux/zalloc.h>
16
17#include <opencsd/ocsd_if_types.h>
18#include <stdlib.h>
19
20#include "auxtrace.h"
21#include "color.h"
22#include "cs-etm.h"
23#include "cs-etm-decoder/cs-etm-decoder.h"
24#include "debug.h"
25#include "dso.h"
26#include "evlist.h"
27#include "intlist.h"
28#include "machine.h"
29#include "map.h"
30#include "perf.h"
31#include "session.h"
32#include "map_symbol.h"
33#include "branch.h"
34#include "symbol.h"
35#include "tool.h"
36#include "thread.h"
37#include "thread-stack.h"
38#include <tools/libc_compat.h>
39#include "util/synthetic-events.h"
40
41struct cs_etm_auxtrace {
42 struct auxtrace auxtrace;
43 struct auxtrace_queues queues;
44 struct auxtrace_heap heap;
45 struct itrace_synth_opts synth_opts;
46 struct perf_session *session;
47 struct machine *machine;
48 struct thread *unknown_thread;
49
50 u8 timeless_decoding;
51 u8 snapshot_mode;
52 u8 data_queued;
53 u8 sample_branches;
54 u8 sample_instructions;
55
56 int num_cpu;
57 u64 latest_kernel_timestamp;
58 u32 auxtrace_type;
59 u64 branches_sample_type;
60 u64 branches_id;
61 u64 instructions_sample_type;
62 u64 instructions_sample_period;
63 u64 instructions_id;
64 u64 **metadata;
65 unsigned int pmu_type;
66};
67
68struct cs_etm_traceid_queue {
69 u8 trace_chan_id;
70 pid_t pid, tid;
71 u64 period_instructions;
72 size_t last_branch_pos;
73 union perf_event *event_buf;
74 struct thread *thread;
75 struct branch_stack *last_branch;
76 struct branch_stack *last_branch_rb;
77 struct cs_etm_packet *prev_packet;
78 struct cs_etm_packet *packet;
79 struct cs_etm_packet_queue packet_queue;
80};
81
82struct cs_etm_queue {
83 struct cs_etm_auxtrace *etm;
84 struct cs_etm_decoder *decoder;
85 struct auxtrace_buffer *buffer;
86 unsigned int queue_nr;
87 u8 pending_timestamp_chan_id;
88 u64 offset;
89 const unsigned char *buf;
90 size_t buf_len, buf_used;
91
92 struct intlist *traceid_queues_list;
93 struct cs_etm_traceid_queue **traceid_queues;
94};
95
96
97static struct intlist *traceid_list;
98
99static int cs_etm__process_queues(struct cs_etm_auxtrace *etm);
100static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
101 pid_t tid);
102static int cs_etm__get_data_block(struct cs_etm_queue *etmq);
103static int cs_etm__decode_data_block(struct cs_etm_queue *etmq);
104
105
106#define ETMIDR_PTM_VERSION 0x00000300
107
108
109
110
111
112
113
114#define TO_CS_QUEUE_NR(queue_nr, trace_chan_id) \
115 (queue_nr << 16 | trace_chan_id)
116#define TO_QUEUE_NR(cs_queue_nr) (cs_queue_nr >> 16)
117#define TO_TRACE_CHAN_ID(cs_queue_nr) (cs_queue_nr & 0x0000ffff)
118
119static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
120{
121 etmidr &= ETMIDR_PTM_VERSION;
122
123 if (etmidr == ETMIDR_PTM_VERSION)
124 return CS_ETM_PROTO_PTM;
125
126 return CS_ETM_PROTO_ETMV3;
127}
128
129static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic)
130{
131 struct int_node *inode;
132 u64 *metadata;
133
134 inode = intlist__find(traceid_list, trace_chan_id);
135 if (!inode)
136 return -EINVAL;
137
138 metadata = inode->priv;
139 *magic = metadata[CS_ETM_MAGIC];
140 return 0;
141}
142
143int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
144{
145 struct int_node *inode;
146 u64 *metadata;
147
148 inode = intlist__find(traceid_list, trace_chan_id);
149 if (!inode)
150 return -EINVAL;
151
152 metadata = inode->priv;
153 *cpu = (int)metadata[CS_ETM_CPU];
154 return 0;
155}
156
157
158
159
160
161
162
163
164
165
166
167
168
169int cs_etm__get_pid_fmt(u8 trace_chan_id, u64 *pid_fmt)
170{
171 struct int_node *inode;
172 u64 *metadata, val;
173
174 inode = intlist__find(traceid_list, trace_chan_id);
175 if (!inode)
176 return -EINVAL;
177
178 metadata = inode->priv;
179
180 if (metadata[CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
181 val = metadata[CS_ETM_ETMCR];
182
183 if (val & BIT(ETM_OPT_CTXTID))
184 *pid_fmt = BIT(ETM_OPT_CTXTID);
185 } else {
186 val = metadata[CS_ETMV4_TRCCONFIGR];
187
188 if (val & (BIT(ETM4_CFG_BIT_VMID) | BIT(ETM4_CFG_BIT_VMID_OPT)))
189 *pid_fmt = BIT(ETM_OPT_CTXTID2);
190
191 else if (val & BIT(ETM4_CFG_BIT_CTXTID))
192 *pid_fmt = BIT(ETM_OPT_CTXTID);
193 }
194
195 return 0;
196}
197
198void cs_etm__etmq_set_traceid_queue_timestamp(struct cs_etm_queue *etmq,
199 u8 trace_chan_id)
200{
201
202
203
204
205
206
207
208 etmq->pending_timestamp_chan_id = trace_chan_id;
209}
210
211static u64 cs_etm__etmq_get_timestamp(struct cs_etm_queue *etmq,
212 u8 *trace_chan_id)
213{
214 struct cs_etm_packet_queue *packet_queue;
215
216 if (!etmq->pending_timestamp_chan_id)
217 return 0;
218
219 if (trace_chan_id)
220 *trace_chan_id = etmq->pending_timestamp_chan_id;
221
222 packet_queue = cs_etm__etmq_get_packet_queue(etmq,
223 etmq->pending_timestamp_chan_id);
224 if (!packet_queue)
225 return 0;
226
227
228 etmq->pending_timestamp_chan_id = 0;
229
230
231 return packet_queue->cs_timestamp;
232}
233
234static void cs_etm__clear_packet_queue(struct cs_etm_packet_queue *queue)
235{
236 int i;
237
238 queue->head = 0;
239 queue->tail = 0;
240 queue->packet_count = 0;
241 for (i = 0; i < CS_ETM_PACKET_MAX_BUFFER; i++) {
242 queue->packet_buffer[i].isa = CS_ETM_ISA_UNKNOWN;
243 queue->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR;
244 queue->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR;
245 queue->packet_buffer[i].instr_count = 0;
246 queue->packet_buffer[i].last_instr_taken_branch = false;
247 queue->packet_buffer[i].last_instr_size = 0;
248 queue->packet_buffer[i].last_instr_type = 0;
249 queue->packet_buffer[i].last_instr_subtype = 0;
250 queue->packet_buffer[i].last_instr_cond = 0;
251 queue->packet_buffer[i].flags = 0;
252 queue->packet_buffer[i].exception_number = UINT32_MAX;
253 queue->packet_buffer[i].trace_chan_id = UINT8_MAX;
254 queue->packet_buffer[i].cpu = INT_MIN;
255 }
256}
257
258static void cs_etm__clear_all_packet_queues(struct cs_etm_queue *etmq)
259{
260 int idx;
261 struct int_node *inode;
262 struct cs_etm_traceid_queue *tidq;
263 struct intlist *traceid_queues_list = etmq->traceid_queues_list;
264
265 intlist__for_each_entry(inode, traceid_queues_list) {
266 idx = (int)(intptr_t)inode->priv;
267 tidq = etmq->traceid_queues[idx];
268 cs_etm__clear_packet_queue(&tidq->packet_queue);
269 }
270}
271
272static int cs_etm__init_traceid_queue(struct cs_etm_queue *etmq,
273 struct cs_etm_traceid_queue *tidq,
274 u8 trace_chan_id)
275{
276 int rc = -ENOMEM;
277 struct auxtrace_queue *queue;
278 struct cs_etm_auxtrace *etm = etmq->etm;
279
280 cs_etm__clear_packet_queue(&tidq->packet_queue);
281
282 queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
283 tidq->tid = queue->tid;
284 tidq->pid = -1;
285 tidq->trace_chan_id = trace_chan_id;
286
287 tidq->packet = zalloc(sizeof(struct cs_etm_packet));
288 if (!tidq->packet)
289 goto out;
290
291 tidq->prev_packet = zalloc(sizeof(struct cs_etm_packet));
292 if (!tidq->prev_packet)
293 goto out_free;
294
295 if (etm->synth_opts.last_branch) {
296 size_t sz = sizeof(struct branch_stack);
297
298 sz += etm->synth_opts.last_branch_sz *
299 sizeof(struct branch_entry);
300 tidq->last_branch = zalloc(sz);
301 if (!tidq->last_branch)
302 goto out_free;
303 tidq->last_branch_rb = zalloc(sz);
304 if (!tidq->last_branch_rb)
305 goto out_free;
306 }
307
308 tidq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
309 if (!tidq->event_buf)
310 goto out_free;
311
312 return 0;
313
314out_free:
315 zfree(&tidq->last_branch_rb);
316 zfree(&tidq->last_branch);
317 zfree(&tidq->prev_packet);
318 zfree(&tidq->packet);
319out:
320 return rc;
321}
322
323static struct cs_etm_traceid_queue
324*cs_etm__etmq_get_traceid_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
325{
326 int idx;
327 struct int_node *inode;
328 struct intlist *traceid_queues_list;
329 struct cs_etm_traceid_queue *tidq, **traceid_queues;
330 struct cs_etm_auxtrace *etm = etmq->etm;
331
332 if (etm->timeless_decoding)
333 trace_chan_id = CS_ETM_PER_THREAD_TRACEID;
334
335 traceid_queues_list = etmq->traceid_queues_list;
336
337
338
339
340
341 inode = intlist__find(traceid_queues_list, trace_chan_id);
342 if (inode) {
343 idx = (int)(intptr_t)inode->priv;
344 return etmq->traceid_queues[idx];
345 }
346
347
348 tidq = malloc(sizeof(*tidq));
349 if (!tidq)
350 return NULL;
351
352 memset(tidq, 0, sizeof(*tidq));
353
354
355 idx = intlist__nr_entries(traceid_queues_list);
356
357 inode = intlist__findnew(traceid_queues_list, trace_chan_id);
358 if (!inode)
359 goto out_free;
360
361
362 inode->priv = (void *)(intptr_t)idx;
363
364 if (cs_etm__init_traceid_queue(etmq, tidq, trace_chan_id))
365 goto out_free;
366
367
368 traceid_queues = etmq->traceid_queues;
369 traceid_queues = reallocarray(traceid_queues,
370 idx + 1,
371 sizeof(*traceid_queues));
372
373
374
375
376
377 if (!traceid_queues)
378 goto out_free;
379
380 traceid_queues[idx] = tidq;
381 etmq->traceid_queues = traceid_queues;
382
383 return etmq->traceid_queues[idx];
384
385out_free:
386
387
388
389
390 intlist__remove(traceid_queues_list, inode);
391 free(tidq);
392
393 return NULL;
394}
395
396struct cs_etm_packet_queue
397*cs_etm__etmq_get_packet_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
398{
399 struct cs_etm_traceid_queue *tidq;
400
401 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
402 if (tidq)
403 return &tidq->packet_queue;
404
405 return NULL;
406}
407
408static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm,
409 struct cs_etm_traceid_queue *tidq)
410{
411 struct cs_etm_packet *tmp;
412
413 if (etm->sample_branches || etm->synth_opts.last_branch ||
414 etm->sample_instructions) {
415
416
417
418
419 tmp = tidq->packet;
420 tidq->packet = tidq->prev_packet;
421 tidq->prev_packet = tmp;
422 }
423}
424
425static void cs_etm__packet_dump(const char *pkt_string)
426{
427 const char *color = PERF_COLOR_BLUE;
428 int len = strlen(pkt_string);
429
430 if (len && (pkt_string[len-1] == '\n'))
431 color_fprintf(stdout, color, " %s", pkt_string);
432 else
433 color_fprintf(stdout, color, " %s\n", pkt_string);
434
435 fflush(stdout);
436}
437
438static void cs_etm__set_trace_param_etmv3(struct cs_etm_trace_params *t_params,
439 struct cs_etm_auxtrace *etm, int idx,
440 u32 etmidr)
441{
442 u64 **metadata = etm->metadata;
443
444 t_params[idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
445 t_params[idx].etmv3.reg_ctrl = metadata[idx][CS_ETM_ETMCR];
446 t_params[idx].etmv3.reg_trc_id = metadata[idx][CS_ETM_ETMTRACEIDR];
447}
448
449static void cs_etm__set_trace_param_etmv4(struct cs_etm_trace_params *t_params,
450 struct cs_etm_auxtrace *etm, int idx)
451{
452 u64 **metadata = etm->metadata;
453
454 t_params[idx].protocol = CS_ETM_PROTO_ETMV4i;
455 t_params[idx].etmv4.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
456 t_params[idx].etmv4.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
457 t_params[idx].etmv4.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
458 t_params[idx].etmv4.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
459 t_params[idx].etmv4.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
460 t_params[idx].etmv4.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
461}
462
463static void cs_etm__set_trace_param_ete(struct cs_etm_trace_params *t_params,
464 struct cs_etm_auxtrace *etm, int idx)
465{
466 u64 **metadata = etm->metadata;
467
468 t_params[idx].protocol = CS_ETM_PROTO_ETE;
469 t_params[idx].ete.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
470 t_params[idx].ete.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
471 t_params[idx].ete.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
472 t_params[idx].ete.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
473 t_params[idx].ete.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
474 t_params[idx].ete.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
475 t_params[idx].ete.reg_devarch = metadata[idx][CS_ETE_TRCDEVARCH];
476}
477
478static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
479 struct cs_etm_auxtrace *etm,
480 int decoders)
481{
482 int i;
483 u32 etmidr;
484 u64 architecture;
485
486 for (i = 0; i < decoders; i++) {
487 architecture = etm->metadata[i][CS_ETM_MAGIC];
488
489 switch (architecture) {
490 case __perf_cs_etmv3_magic:
491 etmidr = etm->metadata[i][CS_ETM_ETMIDR];
492 cs_etm__set_trace_param_etmv3(t_params, etm, i, etmidr);
493 break;
494 case __perf_cs_etmv4_magic:
495 cs_etm__set_trace_param_etmv4(t_params, etm, i);
496 break;
497 case __perf_cs_ete_magic:
498 cs_etm__set_trace_param_ete(t_params, etm, i);
499 break;
500 default:
501 return -EINVAL;
502 }
503 }
504
505 return 0;
506}
507
508static int cs_etm__init_decoder_params(struct cs_etm_decoder_params *d_params,
509 struct cs_etm_queue *etmq,
510 enum cs_etm_decoder_operation mode,
511 bool formatted)
512{
513 int ret = -EINVAL;
514
515 if (!(mode < CS_ETM_OPERATION_MAX))
516 goto out;
517
518 d_params->packet_printer = cs_etm__packet_dump;
519 d_params->operation = mode;
520 d_params->data = etmq;
521 d_params->formatted = formatted;
522 d_params->fsyncs = false;
523 d_params->hsyncs = false;
524 d_params->frame_aligned = true;
525
526 ret = 0;
527out:
528 return ret;
529}
530
531static void cs_etm__dump_event(struct cs_etm_queue *etmq,
532 struct auxtrace_buffer *buffer)
533{
534 int ret;
535 const char *color = PERF_COLOR_BLUE;
536 size_t buffer_used = 0;
537
538 fprintf(stdout, "\n");
539 color_fprintf(stdout, color,
540 ". ... CoreSight %s Trace data: size %zu bytes\n",
541 cs_etm_decoder__get_name(etmq->decoder), buffer->size);
542
543 do {
544 size_t consumed;
545
546 ret = cs_etm_decoder__process_data_block(
547 etmq->decoder, buffer->offset,
548 &((u8 *)buffer->data)[buffer_used],
549 buffer->size - buffer_used, &consumed);
550 if (ret)
551 break;
552
553 buffer_used += consumed;
554 } while (buffer_used < buffer->size);
555
556 cs_etm_decoder__reset(etmq->decoder);
557}
558
559static int cs_etm__flush_events(struct perf_session *session,
560 struct perf_tool *tool)
561{
562 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
563 struct cs_etm_auxtrace,
564 auxtrace);
565 if (dump_trace)
566 return 0;
567
568 if (!tool->ordered_events)
569 return -EINVAL;
570
571 if (etm->timeless_decoding)
572 return cs_etm__process_timeless_queues(etm, -1);
573
574 return cs_etm__process_queues(etm);
575}
576
577static void cs_etm__free_traceid_queues(struct cs_etm_queue *etmq)
578{
579 int idx;
580 uintptr_t priv;
581 struct int_node *inode, *tmp;
582 struct cs_etm_traceid_queue *tidq;
583 struct intlist *traceid_queues_list = etmq->traceid_queues_list;
584
585 intlist__for_each_entry_safe(inode, tmp, traceid_queues_list) {
586 priv = (uintptr_t)inode->priv;
587 idx = priv;
588
589
590 tidq = etmq->traceid_queues[idx];
591 thread__zput(tidq->thread);
592 zfree(&tidq->event_buf);
593 zfree(&tidq->last_branch);
594 zfree(&tidq->last_branch_rb);
595 zfree(&tidq->prev_packet);
596 zfree(&tidq->packet);
597 zfree(&tidq);
598
599
600
601
602
603 intlist__remove(traceid_queues_list, inode);
604 }
605
606
607 intlist__delete(traceid_queues_list);
608 etmq->traceid_queues_list = NULL;
609
610
611 zfree(&etmq->traceid_queues);
612}
613
614static void cs_etm__free_queue(void *priv)
615{
616 struct cs_etm_queue *etmq = priv;
617
618 if (!etmq)
619 return;
620
621 cs_etm_decoder__free(etmq->decoder);
622 cs_etm__free_traceid_queues(etmq);
623 free(etmq);
624}
625
626static void cs_etm__free_events(struct perf_session *session)
627{
628 unsigned int i;
629 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
630 struct cs_etm_auxtrace,
631 auxtrace);
632 struct auxtrace_queues *queues = &aux->queues;
633
634 for (i = 0; i < queues->nr_queues; i++) {
635 cs_etm__free_queue(queues->queue_array[i].priv);
636 queues->queue_array[i].priv = NULL;
637 }
638
639 auxtrace_queues__free(queues);
640}
641
642static void cs_etm__free(struct perf_session *session)
643{
644 int i;
645 struct int_node *inode, *tmp;
646 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
647 struct cs_etm_auxtrace,
648 auxtrace);
649 cs_etm__free_events(session);
650 session->auxtrace = NULL;
651
652
653 intlist__for_each_entry_safe(inode, tmp, traceid_list)
654 intlist__remove(traceid_list, inode);
655
656 intlist__delete(traceid_list);
657
658 for (i = 0; i < aux->num_cpu; i++)
659 zfree(&aux->metadata[i]);
660
661 thread__zput(aux->unknown_thread);
662 zfree(&aux->metadata);
663 zfree(&aux);
664}
665
666static bool cs_etm__evsel_is_auxtrace(struct perf_session *session,
667 struct evsel *evsel)
668{
669 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
670 struct cs_etm_auxtrace,
671 auxtrace);
672
673 return evsel->core.attr.type == aux->pmu_type;
674}
675
676static u8 cs_etm__cpu_mode(struct cs_etm_queue *etmq, u64 address)
677{
678 struct machine *machine;
679
680 machine = etmq->etm->machine;
681
682 if (address >= machine__kernel_start(machine)) {
683 if (machine__is_host(machine))
684 return PERF_RECORD_MISC_KERNEL;
685 else
686 return PERF_RECORD_MISC_GUEST_KERNEL;
687 } else {
688 if (machine__is_host(machine))
689 return PERF_RECORD_MISC_USER;
690 else if (perf_guest)
691 return PERF_RECORD_MISC_GUEST_USER;
692 else
693 return PERF_RECORD_MISC_HYPERVISOR;
694 }
695}
696
697static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u8 trace_chan_id,
698 u64 address, size_t size, u8 *buffer)
699{
700 u8 cpumode;
701 u64 offset;
702 int len;
703 struct thread *thread;
704 struct machine *machine;
705 struct addr_location al;
706 struct cs_etm_traceid_queue *tidq;
707
708 if (!etmq)
709 return 0;
710
711 machine = etmq->etm->machine;
712 cpumode = cs_etm__cpu_mode(etmq, address);
713 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
714 if (!tidq)
715 return 0;
716
717 thread = tidq->thread;
718 if (!thread) {
719 if (cpumode != PERF_RECORD_MISC_KERNEL)
720 return 0;
721 thread = etmq->etm->unknown_thread;
722 }
723
724 if (!thread__find_map(thread, cpumode, address, &al) || !al.map->dso)
725 return 0;
726
727 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
728 dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE))
729 return 0;
730
731 offset = al.map->map_ip(al.map, address);
732
733 map__load(al.map);
734
735 len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size);
736
737 if (len <= 0) {
738 ui__warning_once("CS ETM Trace: Missing DSO. Use 'perf archive' or debuginfod to export data from the traced system.\n"
739 " Enable CONFIG_PROC_KCORE or use option '-k /path/to/vmlinux' for kernel symbols.\n");
740 if (!al.map->dso->auxtrace_warned) {
741 pr_err("CS ETM Trace: Debug data not found for address %#"PRIx64" in %s\n",
742 address,
743 al.map->dso->long_name ? al.map->dso->long_name : "Unknown");
744 al.map->dso->auxtrace_warned = true;
745 }
746 return 0;
747 }
748
749 return len;
750}
751
752static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
753 bool formatted)
754{
755 struct cs_etm_decoder_params d_params;
756 struct cs_etm_trace_params *t_params = NULL;
757 struct cs_etm_queue *etmq;
758
759
760
761
762 int decoders = formatted ? etm->num_cpu : 1;
763
764 etmq = zalloc(sizeof(*etmq));
765 if (!etmq)
766 return NULL;
767
768 etmq->traceid_queues_list = intlist__new(NULL);
769 if (!etmq->traceid_queues_list)
770 goto out_free;
771
772
773 t_params = zalloc(sizeof(*t_params) * decoders);
774
775 if (!t_params)
776 goto out_free;
777
778 if (cs_etm__init_trace_params(t_params, etm, decoders))
779 goto out_free;
780
781
782 if (cs_etm__init_decoder_params(&d_params, etmq,
783 dump_trace ? CS_ETM_OPERATION_PRINT :
784 CS_ETM_OPERATION_DECODE,
785 formatted))
786 goto out_free;
787
788 etmq->decoder = cs_etm_decoder__new(decoders, &d_params,
789 t_params);
790
791 if (!etmq->decoder)
792 goto out_free;
793
794
795
796
797
798 if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
799 0x0L, ((u64) -1L),
800 cs_etm__mem_access))
801 goto out_free_decoder;
802
803 zfree(&t_params);
804 return etmq;
805
806out_free_decoder:
807 cs_etm_decoder__free(etmq->decoder);
808out_free:
809 intlist__delete(etmq->traceid_queues_list);
810 free(etmq);
811
812 return NULL;
813}
814
815static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
816 struct auxtrace_queue *queue,
817 unsigned int queue_nr,
818 bool formatted)
819{
820 struct cs_etm_queue *etmq = queue->priv;
821
822 if (list_empty(&queue->head) || etmq)
823 return 0;
824
825 etmq = cs_etm__alloc_queue(etm, formatted);
826
827 if (!etmq)
828 return -ENOMEM;
829
830 queue->priv = etmq;
831 etmq->etm = etm;
832 etmq->queue_nr = queue_nr;
833 etmq->offset = 0;
834
835 return 0;
836}
837
838static int cs_etm__queue_first_cs_timestamp(struct cs_etm_auxtrace *etm,
839 struct cs_etm_queue *etmq,
840 unsigned int queue_nr)
841{
842 int ret = 0;
843 unsigned int cs_queue_nr;
844 u8 trace_chan_id;
845 u64 cs_timestamp;
846
847
848
849
850
851
852
853
854
855 while (1) {
856
857
858
859
860 ret = cs_etm__get_data_block(etmq);
861 if (ret <= 0)
862 goto out;
863
864
865
866
867
868
869 ret = cs_etm__decode_data_block(etmq);
870 if (ret)
871 goto out;
872
873
874
875
876
877 cs_timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
878
879
880 if (cs_timestamp)
881 break;
882
883
884
885
886
887
888
889
890 cs_etm__clear_all_packet_queues(etmq);
891 }
892
893
894
895
896
897
898
899
900
901
902
903 cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
904 ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
905out:
906 return ret;
907}
908
909static inline
910void cs_etm__copy_last_branch_rb(struct cs_etm_queue *etmq,
911 struct cs_etm_traceid_queue *tidq)
912{
913 struct branch_stack *bs_src = tidq->last_branch_rb;
914 struct branch_stack *bs_dst = tidq->last_branch;
915 size_t nr = 0;
916
917
918
919
920
921 bs_dst->nr = bs_src->nr;
922
923
924
925
926 if (!bs_src->nr)
927 return;
928
929
930
931
932
933
934 nr = etmq->etm->synth_opts.last_branch_sz - tidq->last_branch_pos;
935 memcpy(&bs_dst->entries[0],
936 &bs_src->entries[tidq->last_branch_pos],
937 sizeof(struct branch_entry) * nr);
938
939
940
941
942
943
944
945
946 if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) {
947 memcpy(&bs_dst->entries[nr],
948 &bs_src->entries[0],
949 sizeof(struct branch_entry) * tidq->last_branch_pos);
950 }
951}
952
953static inline
954void cs_etm__reset_last_branch_rb(struct cs_etm_traceid_queue *tidq)
955{
956 tidq->last_branch_pos = 0;
957 tidq->last_branch_rb->nr = 0;
958}
959
960static inline int cs_etm__t32_instr_size(struct cs_etm_queue *etmq,
961 u8 trace_chan_id, u64 addr)
962{
963 u8 instrBytes[2];
964
965 cs_etm__mem_access(etmq, trace_chan_id, addr,
966 ARRAY_SIZE(instrBytes), instrBytes);
967
968
969
970
971
972 return ((instrBytes[1] & 0xF8) >= 0xE8) ? 4 : 2;
973}
974
975static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
976{
977
978 if (packet->sample_type == CS_ETM_DISCONTINUITY)
979 return 0;
980
981 return packet->start_addr;
982}
983
984static inline
985u64 cs_etm__last_executed_instr(const struct cs_etm_packet *packet)
986{
987
988 if (packet->sample_type == CS_ETM_DISCONTINUITY)
989 return 0;
990
991 return packet->end_addr - packet->last_instr_size;
992}
993
994static inline u64 cs_etm__instr_addr(struct cs_etm_queue *etmq,
995 u64 trace_chan_id,
996 const struct cs_etm_packet *packet,
997 u64 offset)
998{
999 if (packet->isa == CS_ETM_ISA_T32) {
1000 u64 addr = packet->start_addr;
1001
1002 while (offset) {
1003 addr += cs_etm__t32_instr_size(etmq,
1004 trace_chan_id, addr);
1005 offset--;
1006 }
1007 return addr;
1008 }
1009
1010
1011 return packet->start_addr + offset * 4;
1012}
1013
1014static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq,
1015 struct cs_etm_traceid_queue *tidq)
1016{
1017 struct branch_stack *bs = tidq->last_branch_rb;
1018 struct branch_entry *be;
1019
1020
1021
1022
1023
1024
1025
1026 if (!tidq->last_branch_pos)
1027 tidq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
1028
1029 tidq->last_branch_pos -= 1;
1030
1031 be = &bs->entries[tidq->last_branch_pos];
1032 be->from = cs_etm__last_executed_instr(tidq->prev_packet);
1033 be->to = cs_etm__first_executed_instr(tidq->packet);
1034
1035 be->flags.mispred = 0;
1036 be->flags.predicted = 1;
1037
1038
1039
1040
1041
1042 if (bs->nr < etmq->etm->synth_opts.last_branch_sz)
1043 bs->nr += 1;
1044}
1045
1046static int cs_etm__inject_event(union perf_event *event,
1047 struct perf_sample *sample, u64 type)
1048{
1049 event->header.size = perf_event__sample_event_size(sample, type, 0);
1050 return perf_event__synthesize_sample(event, type, 0, sample);
1051}
1052
1053
1054static int
1055cs_etm__get_trace(struct cs_etm_queue *etmq)
1056{
1057 struct auxtrace_buffer *aux_buffer = etmq->buffer;
1058 struct auxtrace_buffer *old_buffer = aux_buffer;
1059 struct auxtrace_queue *queue;
1060
1061 queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
1062
1063 aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
1064
1065
1066 if (!aux_buffer) {
1067 if (old_buffer)
1068 auxtrace_buffer__drop_data(old_buffer);
1069 etmq->buf_len = 0;
1070 return 0;
1071 }
1072
1073 etmq->buffer = aux_buffer;
1074
1075
1076 if (!aux_buffer->data) {
1077
1078 int fd = perf_data__fd(etmq->etm->session->data);
1079
1080 aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
1081 if (!aux_buffer->data)
1082 return -ENOMEM;
1083 }
1084
1085
1086 if (old_buffer)
1087 auxtrace_buffer__drop_data(old_buffer);
1088
1089 etmq->buf_used = 0;
1090 etmq->buf_len = aux_buffer->size;
1091 etmq->buf = aux_buffer->data;
1092
1093 return etmq->buf_len;
1094}
1095
1096static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
1097 struct cs_etm_traceid_queue *tidq)
1098{
1099 if ((!tidq->thread) && (tidq->tid != -1))
1100 tidq->thread = machine__find_thread(etm->machine, -1,
1101 tidq->tid);
1102
1103 if (tidq->thread)
1104 tidq->pid = tidq->thread->pid_;
1105}
1106
1107int cs_etm__etmq_set_tid(struct cs_etm_queue *etmq,
1108 pid_t tid, u8 trace_chan_id)
1109{
1110 int cpu, err = -EINVAL;
1111 struct cs_etm_auxtrace *etm = etmq->etm;
1112 struct cs_etm_traceid_queue *tidq;
1113
1114 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
1115 if (!tidq)
1116 return err;
1117
1118 if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
1119 return err;
1120
1121 err = machine__set_current_tid(etm->machine, cpu, tid, tid);
1122 if (err)
1123 return err;
1124
1125 tidq->tid = tid;
1126 thread__zput(tidq->thread);
1127
1128 cs_etm__set_pid_tid_cpu(etm, tidq);
1129 return 0;
1130}
1131
1132bool cs_etm__etmq_is_timeless(struct cs_etm_queue *etmq)
1133{
1134 return !!etmq->etm->timeless_decoding;
1135}
1136
1137static void cs_etm__copy_insn(struct cs_etm_queue *etmq,
1138 u64 trace_chan_id,
1139 const struct cs_etm_packet *packet,
1140 struct perf_sample *sample)
1141{
1142
1143
1144
1145
1146 if (packet->sample_type == CS_ETM_DISCONTINUITY) {
1147 sample->insn_len = 0;
1148 return;
1149 }
1150
1151
1152
1153
1154
1155 if (packet->isa == CS_ETM_ISA_T32)
1156 sample->insn_len = cs_etm__t32_instr_size(etmq, trace_chan_id,
1157 sample->ip);
1158
1159 else
1160 sample->insn_len = 4;
1161
1162 cs_etm__mem_access(etmq, trace_chan_id, sample->ip,
1163 sample->insn_len, (void *)sample->insn);
1164}
1165
1166static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
1167 struct cs_etm_traceid_queue *tidq,
1168 u64 addr, u64 period)
1169{
1170 int ret = 0;
1171 struct cs_etm_auxtrace *etm = etmq->etm;
1172 union perf_event *event = tidq->event_buf;
1173 struct perf_sample sample = {.ip = 0,};
1174
1175 event->sample.header.type = PERF_RECORD_SAMPLE;
1176 event->sample.header.misc = cs_etm__cpu_mode(etmq, addr);
1177 event->sample.header.size = sizeof(struct perf_event_header);
1178
1179 if (!etm->timeless_decoding)
1180 sample.time = etm->latest_kernel_timestamp;
1181 sample.ip = addr;
1182 sample.pid = tidq->pid;
1183 sample.tid = tidq->tid;
1184 sample.id = etmq->etm->instructions_id;
1185 sample.stream_id = etmq->etm->instructions_id;
1186 sample.period = period;
1187 sample.cpu = tidq->packet->cpu;
1188 sample.flags = tidq->prev_packet->flags;
1189 sample.cpumode = event->sample.header.misc;
1190
1191 cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->packet, &sample);
1192
1193 if (etm->synth_opts.last_branch)
1194 sample.branch_stack = tidq->last_branch;
1195
1196 if (etm->synth_opts.inject) {
1197 ret = cs_etm__inject_event(event, &sample,
1198 etm->instructions_sample_type);
1199 if (ret)
1200 return ret;
1201 }
1202
1203 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
1204
1205 if (ret)
1206 pr_err(
1207 "CS ETM Trace: failed to deliver instruction event, error %d\n",
1208 ret);
1209
1210 return ret;
1211}
1212
1213
1214
1215
1216
1217static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
1218 struct cs_etm_traceid_queue *tidq)
1219{
1220 int ret = 0;
1221 struct cs_etm_auxtrace *etm = etmq->etm;
1222 struct perf_sample sample = {.ip = 0,};
1223 union perf_event *event = tidq->event_buf;
1224 struct dummy_branch_stack {
1225 u64 nr;
1226 u64 hw_idx;
1227 struct branch_entry entries;
1228 } dummy_bs;
1229 u64 ip;
1230
1231 ip = cs_etm__last_executed_instr(tidq->prev_packet);
1232
1233 event->sample.header.type = PERF_RECORD_SAMPLE;
1234 event->sample.header.misc = cs_etm__cpu_mode(etmq, ip);
1235 event->sample.header.size = sizeof(struct perf_event_header);
1236
1237 if (!etm->timeless_decoding)
1238 sample.time = etm->latest_kernel_timestamp;
1239 sample.ip = ip;
1240 sample.pid = tidq->pid;
1241 sample.tid = tidq->tid;
1242 sample.addr = cs_etm__first_executed_instr(tidq->packet);
1243 sample.id = etmq->etm->branches_id;
1244 sample.stream_id = etmq->etm->branches_id;
1245 sample.period = 1;
1246 sample.cpu = tidq->packet->cpu;
1247 sample.flags = tidq->prev_packet->flags;
1248 sample.cpumode = event->sample.header.misc;
1249
1250 cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->prev_packet,
1251 &sample);
1252
1253
1254
1255
1256 if (etm->synth_opts.last_branch) {
1257 dummy_bs = (struct dummy_branch_stack){
1258 .nr = 1,
1259 .hw_idx = -1ULL,
1260 .entries = {
1261 .from = sample.ip,
1262 .to = sample.addr,
1263 },
1264 };
1265 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1266 }
1267
1268 if (etm->synth_opts.inject) {
1269 ret = cs_etm__inject_event(event, &sample,
1270 etm->branches_sample_type);
1271 if (ret)
1272 return ret;
1273 }
1274
1275 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
1276
1277 if (ret)
1278 pr_err(
1279 "CS ETM Trace: failed to deliver instruction event, error %d\n",
1280 ret);
1281
1282 return ret;
1283}
1284
1285struct cs_etm_synth {
1286 struct perf_tool dummy_tool;
1287 struct perf_session *session;
1288};
1289
1290static int cs_etm__event_synth(struct perf_tool *tool,
1291 union perf_event *event,
1292 struct perf_sample *sample __maybe_unused,
1293 struct machine *machine __maybe_unused)
1294{
1295 struct cs_etm_synth *cs_etm_synth =
1296 container_of(tool, struct cs_etm_synth, dummy_tool);
1297
1298 return perf_session__deliver_synth_event(cs_etm_synth->session,
1299 event, NULL);
1300}
1301
1302static int cs_etm__synth_event(struct perf_session *session,
1303 struct perf_event_attr *attr, u64 id)
1304{
1305 struct cs_etm_synth cs_etm_synth;
1306
1307 memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
1308 cs_etm_synth.session = session;
1309
1310 return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
1311 &id, cs_etm__event_synth);
1312}
1313
1314static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
1315 struct perf_session *session)
1316{
1317 struct evlist *evlist = session->evlist;
1318 struct evsel *evsel;
1319 struct perf_event_attr attr;
1320 bool found = false;
1321 u64 id;
1322 int err;
1323
1324 evlist__for_each_entry(evlist, evsel) {
1325 if (evsel->core.attr.type == etm->pmu_type) {
1326 found = true;
1327 break;
1328 }
1329 }
1330
1331 if (!found) {
1332 pr_debug("No selected events with CoreSight Trace data\n");
1333 return 0;
1334 }
1335
1336 memset(&attr, 0, sizeof(struct perf_event_attr));
1337 attr.size = sizeof(struct perf_event_attr);
1338 attr.type = PERF_TYPE_HARDWARE;
1339 attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
1340 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
1341 PERF_SAMPLE_PERIOD;
1342 if (etm->timeless_decoding)
1343 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
1344 else
1345 attr.sample_type |= PERF_SAMPLE_TIME;
1346
1347 attr.exclude_user = evsel->core.attr.exclude_user;
1348 attr.exclude_kernel = evsel->core.attr.exclude_kernel;
1349 attr.exclude_hv = evsel->core.attr.exclude_hv;
1350 attr.exclude_host = evsel->core.attr.exclude_host;
1351 attr.exclude_guest = evsel->core.attr.exclude_guest;
1352 attr.sample_id_all = evsel->core.attr.sample_id_all;
1353 attr.read_format = evsel->core.attr.read_format;
1354
1355
1356 id = evsel->core.id[0] + 1000000000;
1357
1358 if (!id)
1359 id = 1;
1360
1361 if (etm->synth_opts.branches) {
1362 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
1363 attr.sample_period = 1;
1364 attr.sample_type |= PERF_SAMPLE_ADDR;
1365 err = cs_etm__synth_event(session, &attr, id);
1366 if (err)
1367 return err;
1368 etm->sample_branches = true;
1369 etm->branches_sample_type = attr.sample_type;
1370 etm->branches_id = id;
1371 id += 1;
1372 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
1373 }
1374
1375 if (etm->synth_opts.last_branch) {
1376 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
1377
1378
1379
1380
1381
1382 attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
1383 }
1384
1385 if (etm->synth_opts.instructions) {
1386 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
1387 attr.sample_period = etm->synth_opts.period;
1388 etm->instructions_sample_period = attr.sample_period;
1389 err = cs_etm__synth_event(session, &attr, id);
1390 if (err)
1391 return err;
1392 etm->sample_instructions = true;
1393 etm->instructions_sample_type = attr.sample_type;
1394 etm->instructions_id = id;
1395 id += 1;
1396 }
1397
1398 return 0;
1399}
1400
1401static int cs_etm__sample(struct cs_etm_queue *etmq,
1402 struct cs_etm_traceid_queue *tidq)
1403{
1404 struct cs_etm_auxtrace *etm = etmq->etm;
1405 int ret;
1406 u8 trace_chan_id = tidq->trace_chan_id;
1407 u64 instrs_prev;
1408
1409
1410 instrs_prev = tidq->period_instructions;
1411
1412 tidq->period_instructions += tidq->packet->instr_count;
1413
1414
1415
1416
1417
1418 if (etm->synth_opts.last_branch &&
1419 tidq->prev_packet->sample_type == CS_ETM_RANGE &&
1420 tidq->prev_packet->last_instr_taken_branch)
1421 cs_etm__update_last_branch_rb(etmq, tidq);
1422
1423 if (etm->sample_instructions &&
1424 tidq->period_instructions >= etm->instructions_sample_period) {
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477 u64 offset = etm->instructions_sample_period - instrs_prev;
1478 u64 addr;
1479
1480
1481 if (etm->synth_opts.last_branch)
1482 cs_etm__copy_last_branch_rb(etmq, tidq);
1483
1484 while (tidq->period_instructions >=
1485 etm->instructions_sample_period) {
1486
1487
1488
1489
1490
1491
1492 addr = cs_etm__instr_addr(etmq, trace_chan_id,
1493 tidq->packet, offset - 1);
1494 ret = cs_etm__synth_instruction_sample(
1495 etmq, tidq, addr,
1496 etm->instructions_sample_period);
1497 if (ret)
1498 return ret;
1499
1500 offset += etm->instructions_sample_period;
1501 tidq->period_instructions -=
1502 etm->instructions_sample_period;
1503 }
1504 }
1505
1506 if (etm->sample_branches) {
1507 bool generate_sample = false;
1508
1509
1510 if (tidq->prev_packet->sample_type == CS_ETM_DISCONTINUITY)
1511 generate_sample = true;
1512
1513
1514 if (tidq->prev_packet->sample_type == CS_ETM_RANGE &&
1515 tidq->prev_packet->last_instr_taken_branch)
1516 generate_sample = true;
1517
1518 if (generate_sample) {
1519 ret = cs_etm__synth_branch_sample(etmq, tidq);
1520 if (ret)
1521 return ret;
1522 }
1523 }
1524
1525 cs_etm__packet_swap(etm, tidq);
1526
1527 return 0;
1528}
1529
1530static int cs_etm__exception(struct cs_etm_traceid_queue *tidq)
1531{
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543 if (tidq->prev_packet->sample_type == CS_ETM_RANGE)
1544 tidq->prev_packet->last_instr_taken_branch = true;
1545
1546 return 0;
1547}
1548
1549static int cs_etm__flush(struct cs_etm_queue *etmq,
1550 struct cs_etm_traceid_queue *tidq)
1551{
1552 int err = 0;
1553 struct cs_etm_auxtrace *etm = etmq->etm;
1554
1555
1556 if (tidq->prev_packet->sample_type == CS_ETM_EMPTY)
1557 goto swap_packet;
1558
1559 if (etmq->etm->synth_opts.last_branch &&
1560 tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1561 u64 addr;
1562
1563
1564 cs_etm__copy_last_branch_rb(etmq, tidq);
1565
1566
1567
1568
1569
1570
1571
1572
1573 addr = cs_etm__last_executed_instr(tidq->prev_packet);
1574
1575 err = cs_etm__synth_instruction_sample(
1576 etmq, tidq, addr,
1577 tidq->period_instructions);
1578 if (err)
1579 return err;
1580
1581 tidq->period_instructions = 0;
1582
1583 }
1584
1585 if (etm->sample_branches &&
1586 tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1587 err = cs_etm__synth_branch_sample(etmq, tidq);
1588 if (err)
1589 return err;
1590 }
1591
1592swap_packet:
1593 cs_etm__packet_swap(etm, tidq);
1594
1595
1596 if (etm->synth_opts.last_branch)
1597 cs_etm__reset_last_branch_rb(tidq);
1598
1599 return err;
1600}
1601
1602static int cs_etm__end_block(struct cs_etm_queue *etmq,
1603 struct cs_etm_traceid_queue *tidq)
1604{
1605 int err;
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616 if (etmq->etm->synth_opts.last_branch &&
1617 tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1618 u64 addr;
1619
1620
1621 cs_etm__copy_last_branch_rb(etmq, tidq);
1622
1623
1624
1625
1626
1627 addr = cs_etm__last_executed_instr(tidq->prev_packet);
1628
1629 err = cs_etm__synth_instruction_sample(
1630 etmq, tidq, addr,
1631 tidq->period_instructions);
1632 if (err)
1633 return err;
1634
1635 tidq->period_instructions = 0;
1636 }
1637
1638 return 0;
1639}
1640
1641
1642
1643
1644
1645
1646
1647static int cs_etm__get_data_block(struct cs_etm_queue *etmq)
1648{
1649 int ret;
1650
1651 if (!etmq->buf_len) {
1652 ret = cs_etm__get_trace(etmq);
1653 if (ret <= 0)
1654 return ret;
1655
1656
1657
1658
1659 ret = cs_etm_decoder__reset(etmq->decoder);
1660 if (ret)
1661 return ret;
1662 }
1663
1664 return etmq->buf_len;
1665}
1666
1667static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq, u8 trace_chan_id,
1668 struct cs_etm_packet *packet,
1669 u64 end_addr)
1670{
1671
1672 u16 instr16 = 0;
1673 u32 instr32 = 0;
1674 u64 addr;
1675
1676 switch (packet->isa) {
1677 case CS_ETM_ISA_T32:
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690 addr = end_addr - 2;
1691 cs_etm__mem_access(etmq, trace_chan_id, addr,
1692 sizeof(instr16), (u8 *)&instr16);
1693 if ((instr16 & 0xFF00) == 0xDF00)
1694 return true;
1695
1696 break;
1697 case CS_ETM_ISA_A32:
1698
1699
1700
1701
1702
1703
1704
1705
1706 addr = end_addr - 4;
1707 cs_etm__mem_access(etmq, trace_chan_id, addr,
1708 sizeof(instr32), (u8 *)&instr32);
1709 if ((instr32 & 0x0F000000) == 0x0F000000 &&
1710 (instr32 & 0xF0000000) != 0xF0000000)
1711 return true;
1712
1713 break;
1714 case CS_ETM_ISA_A64:
1715
1716
1717
1718
1719
1720
1721
1722
1723 addr = end_addr - 4;
1724 cs_etm__mem_access(etmq, trace_chan_id, addr,
1725 sizeof(instr32), (u8 *)&instr32);
1726 if ((instr32 & 0xFFE0001F) == 0xd4000001)
1727 return true;
1728
1729 break;
1730 case CS_ETM_ISA_UNKNOWN:
1731 default:
1732 break;
1733 }
1734
1735 return false;
1736}
1737
1738static bool cs_etm__is_syscall(struct cs_etm_queue *etmq,
1739 struct cs_etm_traceid_queue *tidq, u64 magic)
1740{
1741 u8 trace_chan_id = tidq->trace_chan_id;
1742 struct cs_etm_packet *packet = tidq->packet;
1743 struct cs_etm_packet *prev_packet = tidq->prev_packet;
1744
1745 if (magic == __perf_cs_etmv3_magic)
1746 if (packet->exception_number == CS_ETMV3_EXC_SVC)
1747 return true;
1748
1749
1750
1751
1752
1753
1754 if (magic == __perf_cs_etmv4_magic) {
1755 if (packet->exception_number == CS_ETMV4_EXC_CALL &&
1756 cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
1757 prev_packet->end_addr))
1758 return true;
1759 }
1760
1761 return false;
1762}
1763
1764static bool cs_etm__is_async_exception(struct cs_etm_traceid_queue *tidq,
1765 u64 magic)
1766{
1767 struct cs_etm_packet *packet = tidq->packet;
1768
1769 if (magic == __perf_cs_etmv3_magic)
1770 if (packet->exception_number == CS_ETMV3_EXC_DEBUG_HALT ||
1771 packet->exception_number == CS_ETMV3_EXC_ASYNC_DATA_ABORT ||
1772 packet->exception_number == CS_ETMV3_EXC_PE_RESET ||
1773 packet->exception_number == CS_ETMV3_EXC_IRQ ||
1774 packet->exception_number == CS_ETMV3_EXC_FIQ)
1775 return true;
1776
1777 if (magic == __perf_cs_etmv4_magic)
1778 if (packet->exception_number == CS_ETMV4_EXC_RESET ||
1779 packet->exception_number == CS_ETMV4_EXC_DEBUG_HALT ||
1780 packet->exception_number == CS_ETMV4_EXC_SYSTEM_ERROR ||
1781 packet->exception_number == CS_ETMV4_EXC_INST_DEBUG ||
1782 packet->exception_number == CS_ETMV4_EXC_DATA_DEBUG ||
1783 packet->exception_number == CS_ETMV4_EXC_IRQ ||
1784 packet->exception_number == CS_ETMV4_EXC_FIQ)
1785 return true;
1786
1787 return false;
1788}
1789
1790static bool cs_etm__is_sync_exception(struct cs_etm_queue *etmq,
1791 struct cs_etm_traceid_queue *tidq,
1792 u64 magic)
1793{
1794 u8 trace_chan_id = tidq->trace_chan_id;
1795 struct cs_etm_packet *packet = tidq->packet;
1796 struct cs_etm_packet *prev_packet = tidq->prev_packet;
1797
1798 if (magic == __perf_cs_etmv3_magic)
1799 if (packet->exception_number == CS_ETMV3_EXC_SMC ||
1800 packet->exception_number == CS_ETMV3_EXC_HYP ||
1801 packet->exception_number == CS_ETMV3_EXC_JAZELLE_THUMBEE ||
1802 packet->exception_number == CS_ETMV3_EXC_UNDEFINED_INSTR ||
1803 packet->exception_number == CS_ETMV3_EXC_PREFETCH_ABORT ||
1804 packet->exception_number == CS_ETMV3_EXC_DATA_FAULT ||
1805 packet->exception_number == CS_ETMV3_EXC_GENERIC)
1806 return true;
1807
1808 if (magic == __perf_cs_etmv4_magic) {
1809 if (packet->exception_number == CS_ETMV4_EXC_TRAP ||
1810 packet->exception_number == CS_ETMV4_EXC_ALIGNMENT ||
1811 packet->exception_number == CS_ETMV4_EXC_INST_FAULT ||
1812 packet->exception_number == CS_ETMV4_EXC_DATA_FAULT)
1813 return true;
1814
1815
1816
1817
1818
1819 if (packet->exception_number == CS_ETMV4_EXC_CALL &&
1820 !cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
1821 prev_packet->end_addr))
1822 return true;
1823
1824
1825
1826
1827
1828
1829
1830
1831 if (packet->exception_number > CS_ETMV4_EXC_FIQ &&
1832 packet->exception_number <= CS_ETMV4_EXC_END)
1833 return true;
1834 }
1835
1836 return false;
1837}
1838
1839static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq,
1840 struct cs_etm_traceid_queue *tidq)
1841{
1842 struct cs_etm_packet *packet = tidq->packet;
1843 struct cs_etm_packet *prev_packet = tidq->prev_packet;
1844 u8 trace_chan_id = tidq->trace_chan_id;
1845 u64 magic;
1846 int ret;
1847
1848 switch (packet->sample_type) {
1849 case CS_ETM_RANGE:
1850
1851
1852
1853
1854
1855 if (packet->last_instr_type == OCSD_INSTR_BR &&
1856 packet->last_instr_subtype == OCSD_S_INSTR_NONE) {
1857 packet->flags = PERF_IP_FLAG_BRANCH;
1858
1859 if (packet->last_instr_cond)
1860 packet->flags |= PERF_IP_FLAG_CONDITIONAL;
1861 }
1862
1863
1864
1865
1866
1867 if (packet->last_instr_type == OCSD_INSTR_BR &&
1868 packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
1869 packet->flags = PERF_IP_FLAG_BRANCH |
1870 PERF_IP_FLAG_CALL;
1871
1872
1873
1874
1875
1876 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1877 packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
1878 packet->flags = PERF_IP_FLAG_BRANCH |
1879 PERF_IP_FLAG_CALL;
1880
1881
1882
1883
1884
1885
1886 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1887 packet->last_instr_subtype == OCSD_S_INSTR_V7_IMPLIED_RET)
1888 packet->flags = PERF_IP_FLAG_BRANCH |
1889 PERF_IP_FLAG_RETURN;
1890
1891
1892
1893
1894
1895
1896 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1897 packet->last_instr_subtype == OCSD_S_INSTR_NONE)
1898 packet->flags = PERF_IP_FLAG_BRANCH |
1899 PERF_IP_FLAG_RETURN;
1900
1901
1902 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1903 packet->last_instr_subtype == OCSD_S_INSTR_V8_RET)
1904 packet->flags = PERF_IP_FLAG_BRANCH |
1905 PERF_IP_FLAG_RETURN;
1906
1907
1908
1909
1910
1911
1912 if (prev_packet->sample_type == CS_ETM_DISCONTINUITY)
1913 prev_packet->flags |= PERF_IP_FLAG_BRANCH |
1914 PERF_IP_FLAG_TRACE_BEGIN;
1915
1916
1917
1918
1919
1920
1921
1922 if (prev_packet->flags == (PERF_IP_FLAG_BRANCH |
1923 PERF_IP_FLAG_RETURN |
1924 PERF_IP_FLAG_INTERRUPT) &&
1925 cs_etm__is_svc_instr(etmq, trace_chan_id,
1926 packet, packet->start_addr))
1927 prev_packet->flags = PERF_IP_FLAG_BRANCH |
1928 PERF_IP_FLAG_RETURN |
1929 PERF_IP_FLAG_SYSCALLRET;
1930 break;
1931 case CS_ETM_DISCONTINUITY:
1932
1933
1934
1935
1936
1937 if (prev_packet->sample_type == CS_ETM_RANGE)
1938 prev_packet->flags |= PERF_IP_FLAG_BRANCH |
1939 PERF_IP_FLAG_TRACE_END;
1940 break;
1941 case CS_ETM_EXCEPTION:
1942 ret = cs_etm__get_magic(packet->trace_chan_id, &magic);
1943 if (ret)
1944 return ret;
1945
1946
1947 if (cs_etm__is_syscall(etmq, tidq, magic))
1948 packet->flags = PERF_IP_FLAG_BRANCH |
1949 PERF_IP_FLAG_CALL |
1950 PERF_IP_FLAG_SYSCALLRET;
1951
1952
1953
1954
1955 else if (cs_etm__is_async_exception(tidq, magic))
1956 packet->flags = PERF_IP_FLAG_BRANCH |
1957 PERF_IP_FLAG_CALL |
1958 PERF_IP_FLAG_ASYNC |
1959 PERF_IP_FLAG_INTERRUPT;
1960
1961
1962
1963
1964 else if (cs_etm__is_sync_exception(etmq, tidq, magic))
1965 packet->flags = PERF_IP_FLAG_BRANCH |
1966 PERF_IP_FLAG_CALL |
1967 PERF_IP_FLAG_INTERRUPT;
1968
1969
1970
1971
1972
1973
1974
1975
1976 if (prev_packet->sample_type == CS_ETM_RANGE)
1977 prev_packet->flags = packet->flags;
1978 break;
1979 case CS_ETM_EXCEPTION_RET:
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005 if (prev_packet->sample_type == CS_ETM_RANGE)
2006 prev_packet->flags = PERF_IP_FLAG_BRANCH |
2007 PERF_IP_FLAG_RETURN |
2008 PERF_IP_FLAG_INTERRUPT;
2009 break;
2010 case CS_ETM_EMPTY:
2011 default:
2012 break;
2013 }
2014
2015 return 0;
2016}
2017
2018static int cs_etm__decode_data_block(struct cs_etm_queue *etmq)
2019{
2020 int ret = 0;
2021 size_t processed = 0;
2022
2023
2024
2025
2026
2027
2028
2029
2030 ret = cs_etm_decoder__process_data_block(etmq->decoder,
2031 etmq->offset,
2032 &etmq->buf[etmq->buf_used],
2033 etmq->buf_len,
2034 &processed);
2035 if (ret)
2036 goto out;
2037
2038 etmq->offset += processed;
2039 etmq->buf_used += processed;
2040 etmq->buf_len -= processed;
2041
2042out:
2043 return ret;
2044}
2045
2046static int cs_etm__process_traceid_queue(struct cs_etm_queue *etmq,
2047 struct cs_etm_traceid_queue *tidq)
2048{
2049 int ret;
2050 struct cs_etm_packet_queue *packet_queue;
2051
2052 packet_queue = &tidq->packet_queue;
2053
2054
2055 while (1) {
2056 ret = cs_etm_decoder__get_packet(packet_queue,
2057 tidq->packet);
2058 if (ret <= 0)
2059
2060
2061
2062
2063 break;
2064
2065
2066
2067
2068
2069
2070
2071
2072 ret = cs_etm__set_sample_flags(etmq, tidq);
2073 if (ret < 0)
2074 break;
2075
2076 switch (tidq->packet->sample_type) {
2077 case CS_ETM_RANGE:
2078
2079
2080
2081
2082
2083 cs_etm__sample(etmq, tidq);
2084 break;
2085 case CS_ETM_EXCEPTION:
2086 case CS_ETM_EXCEPTION_RET:
2087
2088
2089
2090
2091
2092 cs_etm__exception(tidq);
2093 break;
2094 case CS_ETM_DISCONTINUITY:
2095
2096
2097
2098
2099 cs_etm__flush(etmq, tidq);
2100 break;
2101 case CS_ETM_EMPTY:
2102
2103
2104
2105
2106 pr_err("CS ETM Trace: empty packet\n");
2107 return -EINVAL;
2108 default:
2109 break;
2110 }
2111 }
2112
2113 return ret;
2114}
2115
2116static void cs_etm__clear_all_traceid_queues(struct cs_etm_queue *etmq)
2117{
2118 int idx;
2119 struct int_node *inode;
2120 struct cs_etm_traceid_queue *tidq;
2121 struct intlist *traceid_queues_list = etmq->traceid_queues_list;
2122
2123 intlist__for_each_entry(inode, traceid_queues_list) {
2124 idx = (int)(intptr_t)inode->priv;
2125 tidq = etmq->traceid_queues[idx];
2126
2127
2128 cs_etm__process_traceid_queue(etmq, tidq);
2129
2130
2131
2132
2133
2134 cs_etm__flush(etmq, tidq);
2135 }
2136}
2137
2138static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
2139{
2140 int err = 0;
2141 struct cs_etm_traceid_queue *tidq;
2142
2143 tidq = cs_etm__etmq_get_traceid_queue(etmq, CS_ETM_PER_THREAD_TRACEID);
2144 if (!tidq)
2145 return -EINVAL;
2146
2147
2148 while (1) {
2149 err = cs_etm__get_data_block(etmq);
2150 if (err <= 0)
2151 return err;
2152
2153
2154 do {
2155 err = cs_etm__decode_data_block(etmq);
2156 if (err)
2157 return err;
2158
2159
2160
2161
2162
2163
2164 err = cs_etm__process_traceid_queue(etmq, tidq);
2165
2166 } while (etmq->buf_len);
2167
2168 if (err == 0)
2169
2170 err = cs_etm__end_block(etmq, tidq);
2171 }
2172
2173 return err;
2174}
2175
2176static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
2177 pid_t tid)
2178{
2179 unsigned int i;
2180 struct auxtrace_queues *queues = &etm->queues;
2181
2182 for (i = 0; i < queues->nr_queues; i++) {
2183 struct auxtrace_queue *queue = &etm->queues.queue_array[i];
2184 struct cs_etm_queue *etmq = queue->priv;
2185 struct cs_etm_traceid_queue *tidq;
2186
2187 if (!etmq)
2188 continue;
2189
2190 tidq = cs_etm__etmq_get_traceid_queue(etmq,
2191 CS_ETM_PER_THREAD_TRACEID);
2192
2193 if (!tidq)
2194 continue;
2195
2196 if ((tid == -1) || (tidq->tid == tid)) {
2197 cs_etm__set_pid_tid_cpu(etm, tidq);
2198 cs_etm__run_decoder(etmq);
2199 }
2200 }
2201
2202 return 0;
2203}
2204
2205static int cs_etm__process_queues(struct cs_etm_auxtrace *etm)
2206{
2207 int ret = 0;
2208 unsigned int cs_queue_nr, queue_nr, i;
2209 u8 trace_chan_id;
2210 u64 cs_timestamp;
2211 struct auxtrace_queue *queue;
2212 struct cs_etm_queue *etmq;
2213 struct cs_etm_traceid_queue *tidq;
2214
2215
2216
2217
2218
2219 for (i = 0; i < etm->queues.nr_queues; i++) {
2220 etmq = etm->queues.queue_array[i].priv;
2221 if (!etmq)
2222 continue;
2223
2224 ret = cs_etm__queue_first_cs_timestamp(etm, etmq, i);
2225 if (ret)
2226 return ret;
2227 }
2228
2229 while (1) {
2230 if (!etm->heap.heap_cnt)
2231 goto out;
2232
2233
2234 cs_queue_nr = etm->heap.heap_array[0].queue_nr;
2235 queue_nr = TO_QUEUE_NR(cs_queue_nr);
2236 trace_chan_id = TO_TRACE_CHAN_ID(cs_queue_nr);
2237 queue = &etm->queues.queue_array[queue_nr];
2238 etmq = queue->priv;
2239
2240
2241
2242
2243
2244 auxtrace_heap__pop(&etm->heap);
2245
2246 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
2247 if (!tidq) {
2248
2249
2250
2251
2252
2253 ret = -EINVAL;
2254 goto out;
2255 }
2256
2257
2258
2259
2260
2261 ret = cs_etm__process_traceid_queue(etmq, tidq);
2262 if (ret < 0)
2263 goto out;
2264
2265
2266
2267
2268
2269
2270refetch:
2271 ret = cs_etm__get_data_block(etmq);
2272 if (ret < 0)
2273 goto out;
2274
2275
2276
2277
2278
2279 if (!ret)
2280 continue;
2281
2282 ret = cs_etm__decode_data_block(etmq);
2283 if (ret)
2284 goto out;
2285
2286 cs_timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
2287
2288 if (!cs_timestamp) {
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298 cs_etm__clear_all_traceid_queues(etmq);
2299
2300
2301 goto refetch;
2302 }
2303
2304
2305
2306
2307
2308
2309
2310 cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
2311 ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
2312 }
2313
2314out:
2315 return ret;
2316}
2317
2318static int cs_etm__process_itrace_start(struct cs_etm_auxtrace *etm,
2319 union perf_event *event)
2320{
2321 struct thread *th;
2322
2323 if (etm->timeless_decoding)
2324 return 0;
2325
2326
2327
2328
2329
2330 th = machine__findnew_thread(etm->machine,
2331 event->itrace_start.pid,
2332 event->itrace_start.tid);
2333 if (!th)
2334 return -ENOMEM;
2335
2336 thread__put(th);
2337
2338 return 0;
2339}
2340
2341static int cs_etm__process_switch_cpu_wide(struct cs_etm_auxtrace *etm,
2342 union perf_event *event)
2343{
2344 struct thread *th;
2345 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
2346
2347
2348
2349
2350
2351 if (etm->timeless_decoding)
2352 return 0;
2353
2354
2355
2356
2357
2358
2359 if (!out)
2360 return 0;
2361
2362
2363
2364
2365
2366 th = machine__findnew_thread(etm->machine,
2367 event->context_switch.next_prev_pid,
2368 event->context_switch.next_prev_tid);
2369 if (!th)
2370 return -ENOMEM;
2371
2372 thread__put(th);
2373
2374 return 0;
2375}
2376
2377static int cs_etm__process_event(struct perf_session *session,
2378 union perf_event *event,
2379 struct perf_sample *sample,
2380 struct perf_tool *tool)
2381{
2382 u64 sample_kernel_timestamp;
2383 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2384 struct cs_etm_auxtrace,
2385 auxtrace);
2386
2387 if (dump_trace)
2388 return 0;
2389
2390 if (!tool->ordered_events) {
2391 pr_err("CoreSight ETM Trace requires ordered events\n");
2392 return -EINVAL;
2393 }
2394
2395 if (sample->time && (sample->time != (u64) -1))
2396 sample_kernel_timestamp = sample->time;
2397 else
2398 sample_kernel_timestamp = 0;
2399
2400
2401
2402
2403
2404
2405 if (etm->timeless_decoding &&
2406 event->header.type == PERF_RECORD_EXIT)
2407 return cs_etm__process_timeless_queues(etm,
2408 event->fork.tid);
2409
2410 if (event->header.type == PERF_RECORD_ITRACE_START)
2411 return cs_etm__process_itrace_start(etm, event);
2412 else if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
2413 return cs_etm__process_switch_cpu_wide(etm, event);
2414
2415 if (!etm->timeless_decoding && event->header.type == PERF_RECORD_AUX) {
2416
2417
2418
2419
2420
2421 etm->latest_kernel_timestamp = sample_kernel_timestamp;
2422 }
2423
2424 return 0;
2425}
2426
2427static void dump_queued_data(struct cs_etm_auxtrace *etm,
2428 struct perf_record_auxtrace *event)
2429{
2430 struct auxtrace_buffer *buf;
2431 unsigned int i;
2432
2433
2434
2435
2436
2437 for (i = 0; i < etm->queues.nr_queues; ++i)
2438 list_for_each_entry(buf, &etm->queues.queue_array[i].head, list)
2439 if (buf->reference == event->reference)
2440 cs_etm__dump_event(etm->queues.queue_array[i].priv, buf);
2441}
2442
2443static int cs_etm__process_auxtrace_event(struct perf_session *session,
2444 union perf_event *event,
2445 struct perf_tool *tool __maybe_unused)
2446{
2447 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2448 struct cs_etm_auxtrace,
2449 auxtrace);
2450 if (!etm->data_queued) {
2451 struct auxtrace_buffer *buffer;
2452 off_t data_offset;
2453 int fd = perf_data__fd(session->data);
2454 bool is_pipe = perf_data__is_pipe(session->data);
2455 int err;
2456 int idx = event->auxtrace.idx;
2457
2458 if (is_pipe)
2459 data_offset = 0;
2460 else {
2461 data_offset = lseek(fd, 0, SEEK_CUR);
2462 if (data_offset == -1)
2463 return -errno;
2464 }
2465
2466 err = auxtrace_queues__add_event(&etm->queues, session,
2467 event, data_offset, &buffer);
2468 if (err)
2469 return err;
2470
2471
2472
2473
2474
2475
2476
2477 err = cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
2478 idx, true);
2479 if (err)
2480 return err;
2481
2482 if (dump_trace)
2483 if (auxtrace_buffer__get_data(buffer, fd)) {
2484 cs_etm__dump_event(etm->queues.queue_array[idx].priv, buffer);
2485 auxtrace_buffer__put_data(buffer);
2486 }
2487 } else if (dump_trace)
2488 dump_queued_data(etm, &event->auxtrace);
2489
2490 return 0;
2491}
2492
2493static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
2494{
2495 struct evsel *evsel;
2496 struct evlist *evlist = etm->session->evlist;
2497 bool timeless_decoding = true;
2498
2499
2500 if (etm->synth_opts.timeless_decoding)
2501 return true;
2502
2503
2504
2505
2506
2507 evlist__for_each_entry(evlist, evsel) {
2508 if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
2509 timeless_decoding = false;
2510 }
2511
2512 return timeless_decoding;
2513}
2514
2515static const char * const cs_etm_global_header_fmts[] = {
2516 [CS_HEADER_VERSION] = " Header version %llx\n",
2517 [CS_PMU_TYPE_CPUS] = " PMU type/num cpus %llx\n",
2518 [CS_ETM_SNAPSHOT] = " Snapshot %llx\n",
2519};
2520
2521static const char * const cs_etm_priv_fmts[] = {
2522 [CS_ETM_MAGIC] = " Magic number %llx\n",
2523 [CS_ETM_CPU] = " CPU %lld\n",
2524 [CS_ETM_NR_TRC_PARAMS] = " NR_TRC_PARAMS %llx\n",
2525 [CS_ETM_ETMCR] = " ETMCR %llx\n",
2526 [CS_ETM_ETMTRACEIDR] = " ETMTRACEIDR %llx\n",
2527 [CS_ETM_ETMCCER] = " ETMCCER %llx\n",
2528 [CS_ETM_ETMIDR] = " ETMIDR %llx\n",
2529};
2530
2531static const char * const cs_etmv4_priv_fmts[] = {
2532 [CS_ETM_MAGIC] = " Magic number %llx\n",
2533 [CS_ETM_CPU] = " CPU %lld\n",
2534 [CS_ETM_NR_TRC_PARAMS] = " NR_TRC_PARAMS %llx\n",
2535 [CS_ETMV4_TRCCONFIGR] = " TRCCONFIGR %llx\n",
2536 [CS_ETMV4_TRCTRACEIDR] = " TRCTRACEIDR %llx\n",
2537 [CS_ETMV4_TRCIDR0] = " TRCIDR0 %llx\n",
2538 [CS_ETMV4_TRCIDR1] = " TRCIDR1 %llx\n",
2539 [CS_ETMV4_TRCIDR2] = " TRCIDR2 %llx\n",
2540 [CS_ETMV4_TRCIDR8] = " TRCIDR8 %llx\n",
2541 [CS_ETMV4_TRCAUTHSTATUS] = " TRCAUTHSTATUS %llx\n",
2542 [CS_ETE_TRCDEVARCH] = " TRCDEVARCH %llx\n"
2543};
2544
2545static const char * const param_unk_fmt =
2546 " Unknown parameter [%d] %llx\n";
2547static const char * const magic_unk_fmt =
2548 " Magic number Unknown %llx\n";
2549
2550static int cs_etm__print_cpu_metadata_v0(__u64 *val, int *offset)
2551{
2552 int i = *offset, j, nr_params = 0, fmt_offset;
2553 __u64 magic;
2554
2555
2556 magic = val[i + CS_ETM_MAGIC];
2557 if ((magic != __perf_cs_etmv3_magic) &&
2558 (magic != __perf_cs_etmv4_magic)) {
2559
2560 fprintf(stdout, magic_unk_fmt, magic);
2561 return -EINVAL;
2562 }
2563
2564
2565 fprintf(stdout, cs_etm_priv_fmts[CS_ETM_MAGIC], val[i++]);
2566 fprintf(stdout, cs_etm_priv_fmts[CS_ETM_CPU], val[i++]);
2567
2568 if (magic == __perf_cs_etmv3_magic) {
2569 nr_params = CS_ETM_NR_TRC_PARAMS_V0;
2570 fmt_offset = CS_ETM_ETMCR;
2571
2572 for (j = fmt_offset; j < nr_params + fmt_offset; j++, i++)
2573 fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
2574 } else if (magic == __perf_cs_etmv4_magic) {
2575 nr_params = CS_ETMV4_NR_TRC_PARAMS_V0;
2576 fmt_offset = CS_ETMV4_TRCCONFIGR;
2577
2578 for (j = fmt_offset; j < nr_params + fmt_offset; j++, i++)
2579 fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
2580 }
2581 *offset = i;
2582 return 0;
2583}
2584
2585static int cs_etm__print_cpu_metadata_v1(__u64 *val, int *offset)
2586{
2587 int i = *offset, j, total_params = 0;
2588 __u64 magic;
2589
2590 magic = val[i + CS_ETM_MAGIC];
2591
2592 total_params = val[i + CS_ETM_NR_TRC_PARAMS] + CS_ETM_COMMON_BLK_MAX_V1;
2593
2594 if (magic == __perf_cs_etmv3_magic) {
2595 for (j = 0; j < total_params; j++, i++) {
2596
2597 if (j >= CS_ETM_PRIV_MAX)
2598 fprintf(stdout, param_unk_fmt, j, val[i]);
2599 else
2600 fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
2601 }
2602 } else if (magic == __perf_cs_etmv4_magic || magic == __perf_cs_ete_magic) {
2603
2604
2605
2606
2607
2608 for (j = 0; j < total_params; j++, i++) {
2609
2610 if (j >= CS_ETE_PRIV_MAX)
2611 fprintf(stdout, param_unk_fmt, j, val[i]);
2612 else
2613 fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
2614 }
2615 } else {
2616
2617 fprintf(stdout, magic_unk_fmt, magic);
2618 return -EINVAL;
2619 }
2620 *offset = i;
2621 return 0;
2622}
2623
2624static void cs_etm__print_auxtrace_info(__u64 *val, int num)
2625{
2626 int i, cpu = 0, version, err;
2627
2628
2629 version = val[0];
2630 if (version > CS_HEADER_CURRENT_VERSION) {
2631
2632 fprintf(stdout, " Unknown Header Version = %x, ", version);
2633 fprintf(stdout, "Version supported <= %x\n", CS_HEADER_CURRENT_VERSION);
2634 return;
2635 }
2636
2637 for (i = 0; i < CS_HEADER_VERSION_MAX; i++)
2638 fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
2639
2640 for (i = CS_HEADER_VERSION_MAX; cpu < num; cpu++) {
2641 if (version == 0)
2642 err = cs_etm__print_cpu_metadata_v0(val, &i);
2643 else if (version == 1)
2644 err = cs_etm__print_cpu_metadata_v1(val, &i);
2645 if (err)
2646 return;
2647 }
2648}
2649
2650
2651
2652
2653
2654
2655
2656
2657
2658
2659
2660static u64 *cs_etm__create_meta_blk(u64 *buff_in, int *buff_in_offset,
2661 int out_blk_size, int nr_params_v0)
2662{
2663 u64 *metadata = NULL;
2664 int hdr_version;
2665 int nr_in_params, nr_out_params, nr_cmn_params;
2666 int i, k;
2667
2668 metadata = zalloc(sizeof(*metadata) * out_blk_size);
2669 if (!metadata)
2670 return NULL;
2671
2672
2673 i = *buff_in_offset;
2674 hdr_version = buff_in[CS_HEADER_VERSION];
2675
2676 if (!hdr_version) {
2677
2678 nr_in_params = nr_params_v0;
2679 metadata[CS_ETM_MAGIC] = buff_in[i + CS_ETM_MAGIC];
2680 metadata[CS_ETM_CPU] = buff_in[i + CS_ETM_CPU];
2681 metadata[CS_ETM_NR_TRC_PARAMS] = nr_in_params;
2682
2683 for (k = CS_ETM_COMMON_BLK_MAX_V1 - 1; k < nr_in_params; k++)
2684 metadata[k + 1] = buff_in[i + k];
2685
2686 nr_cmn_params = 2;
2687 } else {
2688
2689
2690 nr_cmn_params = 3;
2691 nr_in_params = buff_in[i + CS_ETM_NR_TRC_PARAMS];
2692
2693
2694 nr_out_params = nr_in_params + nr_cmn_params;
2695 if (nr_out_params > out_blk_size)
2696 nr_out_params = out_blk_size;
2697
2698 for (k = CS_ETM_MAGIC; k < nr_out_params; k++)
2699 metadata[k] = buff_in[i + k];
2700
2701
2702 metadata[CS_ETM_NR_TRC_PARAMS] = nr_out_params - nr_cmn_params;
2703 }
2704
2705
2706 i += nr_in_params + nr_cmn_params;
2707 *buff_in_offset = i;
2708 return metadata;
2709}
2710
2711
2712
2713
2714
2715
2716
2717
2718
2719
2720
2721static int cs_etm__queue_aux_fragment(struct perf_session *session, off_t file_offset, size_t sz,
2722 struct perf_record_aux *aux_event, struct perf_sample *sample)
2723{
2724 int err;
2725 char buf[PERF_SAMPLE_MAX_SIZE];
2726 union perf_event *auxtrace_event_union;
2727 struct perf_record_auxtrace *auxtrace_event;
2728 union perf_event auxtrace_fragment;
2729 __u64 aux_offset, aux_size;
2730 __u32 idx;
2731 bool formatted;
2732
2733 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2734 struct cs_etm_auxtrace,
2735 auxtrace);
2736
2737
2738
2739
2740
2741 err = perf_session__peek_event(session, file_offset, buf,
2742 PERF_SAMPLE_MAX_SIZE, &auxtrace_event_union, NULL);
2743 if (err)
2744 return err;
2745 auxtrace_event = &auxtrace_event_union->auxtrace;
2746 if (auxtrace_event->header.type != PERF_RECORD_AUXTRACE)
2747 return -EINVAL;
2748
2749 if (auxtrace_event->header.size < sizeof(struct perf_record_auxtrace) ||
2750 auxtrace_event->header.size != sz) {
2751 return -EINVAL;
2752 }
2753
2754
2755
2756
2757
2758 if ((auxtrace_event->cpu == (__u32) -1 && auxtrace_event->tid != sample->tid) ||
2759 auxtrace_event->cpu != sample->cpu)
2760 return 1;
2761
2762 if (aux_event->flags & PERF_AUX_FLAG_OVERWRITE) {
2763
2764
2765
2766
2767
2768 aux_size = min(aux_event->aux_size, auxtrace_event->size);
2769
2770
2771
2772
2773
2774 aux_offset = aux_event->aux_offset - aux_size;
2775 } else {
2776 aux_size = aux_event->aux_size;
2777 aux_offset = aux_event->aux_offset;
2778 }
2779
2780 if (aux_offset >= auxtrace_event->offset &&
2781 aux_offset + aux_size <= auxtrace_event->offset + auxtrace_event->size) {
2782
2783
2784
2785
2786 auxtrace_fragment.auxtrace = *auxtrace_event;
2787 auxtrace_fragment.auxtrace.size = aux_size;
2788 auxtrace_fragment.auxtrace.offset = aux_offset;
2789 file_offset += aux_offset - auxtrace_event->offset + auxtrace_event->header.size;
2790
2791 pr_debug3("CS ETM: Queue buffer size: %#"PRI_lx64" offset: %#"PRI_lx64
2792 " tid: %d cpu: %d\n", aux_size, aux_offset, sample->tid, sample->cpu);
2793 err = auxtrace_queues__add_event(&etm->queues, session, &auxtrace_fragment,
2794 file_offset, NULL);
2795 if (err)
2796 return err;
2797
2798 idx = auxtrace_event->idx;
2799 formatted = !(aux_event->flags & PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
2800 return cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
2801 idx, formatted);
2802 }
2803
2804
2805 return 1;
2806}
2807
2808static int cs_etm__queue_aux_records_cb(struct perf_session *session, union perf_event *event,
2809 u64 offset __maybe_unused, void *data __maybe_unused)
2810{
2811 struct perf_sample sample;
2812 int ret;
2813 struct auxtrace_index_entry *ent;
2814 struct auxtrace_index *auxtrace_index;
2815 struct evsel *evsel;
2816 size_t i;
2817
2818
2819 if (event->header.type != PERF_RECORD_AUX)
2820 return 0;
2821
2822 if (event->header.size < sizeof(struct perf_record_aux))
2823 return -EINVAL;
2824
2825
2826 if (!event->aux.aux_size)
2827 return 0;
2828
2829
2830
2831
2832
2833 evsel = evlist__event2evsel(session->evlist, event);
2834 if (!evsel)
2835 return -EINVAL;
2836 ret = evsel__parse_sample(evsel, event, &sample);
2837 if (ret)
2838 return ret;
2839
2840
2841
2842
2843 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
2844 for (i = 0; i < auxtrace_index->nr; i++) {
2845 ent = &auxtrace_index->entries[i];
2846 ret = cs_etm__queue_aux_fragment(session, ent->file_offset,
2847 ent->sz, &event->aux, &sample);
2848
2849
2850
2851
2852 if (ret != 1)
2853 return ret;
2854 }
2855 }
2856
2857
2858
2859
2860
2861 pr_err("CS ETM: Couldn't find auxtrace buffer for aux_offset: %#"PRI_lx64
2862 " tid: %d cpu: %d\n", event->aux.aux_offset, sample.tid, sample.cpu);
2863 return 0;
2864}
2865
2866static int cs_etm__queue_aux_records(struct perf_session *session)
2867{
2868 struct auxtrace_index *index = list_first_entry_or_null(&session->auxtrace_index,
2869 struct auxtrace_index, list);
2870 if (index && index->nr > 0)
2871 return perf_session__peek_events(session, session->header.data_offset,
2872 session->header.data_size,
2873 cs_etm__queue_aux_records_cb, NULL);
2874
2875
2876
2877
2878
2879
2880
2881
2882
2883 return 0;
2884}
2885
2886int cs_etm__process_auxtrace_info(union perf_event *event,
2887 struct perf_session *session)
2888{
2889 struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
2890 struct cs_etm_auxtrace *etm = NULL;
2891 struct int_node *inode;
2892 unsigned int pmu_type;
2893 int event_header_size = sizeof(struct perf_event_header);
2894 int info_header_size;
2895 int total_size = auxtrace_info->header.size;
2896 int priv_size = 0;
2897 int num_cpu, trcidr_idx;
2898 int err = 0;
2899 int i, j;
2900 u64 *ptr, *hdr = NULL;
2901 u64 **metadata = NULL;
2902 u64 hdr_version;
2903
2904
2905
2906
2907
2908 info_header_size = 8;
2909
2910 if (total_size < (event_header_size + info_header_size))
2911 return -EINVAL;
2912
2913 priv_size = total_size - event_header_size - info_header_size;
2914
2915
2916 ptr = (u64 *) auxtrace_info->priv;
2917
2918
2919 hdr_version = ptr[0];
2920 if (hdr_version > CS_HEADER_CURRENT_VERSION) {
2921
2922 if (dump_trace)
2923 cs_etm__print_auxtrace_info(auxtrace_info->priv, 0);
2924 return -EINVAL;
2925 }
2926
2927 hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_MAX);
2928 if (!hdr)
2929 return -ENOMEM;
2930
2931
2932 for (i = 0; i < CS_HEADER_VERSION_MAX; i++)
2933 hdr[i] = ptr[i];
2934 num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
2935 pmu_type = (unsigned int) ((hdr[CS_PMU_TYPE_CPUS] >> 32) &
2936 0xffffffff);
2937
2938
2939
2940
2941
2942
2943 traceid_list = intlist__new(NULL);
2944 if (!traceid_list) {
2945 err = -ENOMEM;
2946 goto err_free_hdr;
2947 }
2948
2949 metadata = zalloc(sizeof(*metadata) * num_cpu);
2950 if (!metadata) {
2951 err = -ENOMEM;
2952 goto err_free_traceid_list;
2953 }
2954
2955
2956
2957
2958
2959
2960
2961 for (j = 0; j < num_cpu; j++) {
2962 if (ptr[i] == __perf_cs_etmv3_magic) {
2963 metadata[j] =
2964 cs_etm__create_meta_blk(ptr, &i,
2965 CS_ETM_PRIV_MAX,
2966 CS_ETM_NR_TRC_PARAMS_V0);
2967
2968
2969 trcidr_idx = CS_ETM_ETMTRACEIDR;
2970
2971 } else if (ptr[i] == __perf_cs_etmv4_magic) {
2972 metadata[j] =
2973 cs_etm__create_meta_blk(ptr, &i,
2974 CS_ETMV4_PRIV_MAX,
2975 CS_ETMV4_NR_TRC_PARAMS_V0);
2976
2977
2978 trcidr_idx = CS_ETMV4_TRCTRACEIDR;
2979 } else if (ptr[i] == __perf_cs_ete_magic) {
2980 metadata[j] = cs_etm__create_meta_blk(ptr, &i, CS_ETE_PRIV_MAX, -1);
2981
2982
2983 trcidr_idx = CS_ETMV4_TRCTRACEIDR;
2984 } else {
2985 ui__error("CS ETM Trace: Unrecognised magic number %#"PRIx64". File could be from a newer version of perf.\n",
2986 ptr[i]);
2987 err = -EINVAL;
2988 goto err_free_metadata;
2989 }
2990
2991 if (!metadata[j]) {
2992 err = -ENOMEM;
2993 goto err_free_metadata;
2994 }
2995
2996
2997 inode = intlist__findnew(traceid_list, metadata[j][trcidr_idx]);
2998
2999
3000 if (!inode) {
3001 err = -ENOMEM;
3002 goto err_free_metadata;
3003 }
3004
3005
3006
3007
3008
3009 if (inode->priv) {
3010 err = -EINVAL;
3011 goto err_free_metadata;
3012 }
3013
3014 inode->priv = metadata[j];
3015 }
3016
3017
3018
3019
3020
3021
3022
3023
3024 if (i * 8 != priv_size) {
3025 err = -EINVAL;
3026 goto err_free_metadata;
3027 }
3028
3029 etm = zalloc(sizeof(*etm));
3030
3031 if (!etm) {
3032 err = -ENOMEM;
3033 goto err_free_metadata;
3034 }
3035
3036 err = auxtrace_queues__init(&etm->queues);
3037 if (err)
3038 goto err_free_etm;
3039
3040 if (session->itrace_synth_opts->set) {
3041 etm->synth_opts = *session->itrace_synth_opts;
3042 } else {
3043 itrace_synth_opts__set_default(&etm->synth_opts,
3044 session->itrace_synth_opts->default_no_sample);
3045 etm->synth_opts.callchain = false;
3046 }
3047
3048 etm->session = session;
3049 etm->machine = &session->machines.host;
3050
3051 etm->num_cpu = num_cpu;
3052 etm->pmu_type = pmu_type;
3053 etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
3054 etm->metadata = metadata;
3055 etm->auxtrace_type = auxtrace_info->type;
3056 etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
3057
3058 etm->auxtrace.process_event = cs_etm__process_event;
3059 etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
3060 etm->auxtrace.flush_events = cs_etm__flush_events;
3061 etm->auxtrace.free_events = cs_etm__free_events;
3062 etm->auxtrace.free = cs_etm__free;
3063 etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace;
3064 session->auxtrace = &etm->auxtrace;
3065
3066 etm->unknown_thread = thread__new(999999999, 999999999);
3067 if (!etm->unknown_thread) {
3068 err = -ENOMEM;
3069 goto err_free_queues;
3070 }
3071
3072
3073
3074
3075
3076 INIT_LIST_HEAD(&etm->unknown_thread->node);
3077
3078 err = thread__set_comm(etm->unknown_thread, "unknown", 0);
3079 if (err)
3080 goto err_delete_thread;
3081
3082 if (thread__init_maps(etm->unknown_thread, etm->machine)) {
3083 err = -ENOMEM;
3084 goto err_delete_thread;
3085 }
3086
3087 if (dump_trace) {
3088 cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
3089 }
3090
3091 err = cs_etm__synth_events(etm, session);
3092 if (err)
3093 goto err_delete_thread;
3094
3095 err = cs_etm__queue_aux_records(session);
3096 if (err)
3097 goto err_delete_thread;
3098
3099 etm->data_queued = etm->queues.populated;
3100
3101
3102
3103
3104 if (!etm->data_queued)
3105 pr_warning("CS ETM warning: Coresight decode and TRBE support requires random file access.\n"
3106 "Continuing with best effort decoding in piped mode.\n\n");
3107
3108 return 0;
3109
3110err_delete_thread:
3111 thread__zput(etm->unknown_thread);
3112err_free_queues:
3113 auxtrace_queues__free(&etm->queues);
3114 session->auxtrace = NULL;
3115err_free_etm:
3116 zfree(&etm);
3117err_free_metadata:
3118
3119 for (j = 0; j < num_cpu; j++)
3120 zfree(&metadata[j]);
3121 zfree(&metadata);
3122err_free_traceid_list:
3123 intlist__delete(traceid_list);
3124err_free_hdr:
3125 zfree(&hdr);
3126
3127
3128
3129
3130
3131 if (dump_trace)
3132 cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
3133 return err;
3134}
3135